-
Notifications
You must be signed in to change notification settings - Fork 70
/
sig.py
executable file
·150 lines (122 loc) · 5.35 KB
/
sig.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
'''
A new backdoor attack in CNNs by training set corruption without label poisoning
this script is for SIG attack
Note that in our implementation poisoning ratio is ALWAYS defined as
number of poisoning samples / number of all samples instead of
number of poisoning samples / number of samples in target class !!!
@inproceedings{SIG,
title = {A new backdoor attack in CNNs by training set corruption without label poisoning},
author = {Barni, Mauro and Kallas, Kassem and Tondi, Benedetta},
booktitle = {2019 IEEE International Conference on Image Processing},
year = 2019,
}
basic structure:
1. config args, save_path, fix random seed
2. set the clean train data and clean test data
3. set the attack img transform and label transform
4. set the backdoor attack data and backdoor test data
5. set the device, model, criterion, optimizer, training schedule.
6. attack or use the model to do finetune with 5% clean data
7. save the attack result for defense
'''
import argparse
import logging
import os
import sys
import torch
import numpy as np
sys.path = ["./"] + sys.path
from attack.badnet import BadNet, add_common_attack_args
from utils.backdoor_generate_poison_index import generate_poison_index_from_label_transform
from utils.aggregate_block.bd_attack_generate import bd_attack_img_trans_generate, bd_attack_label_trans_generate
from copy import deepcopy
from utils.bd_dataset_v2 import prepro_cls_DatasetBD_v2, dataset_wrapper_with_transform
class SIG(BadNet):
def set_bd_args(cls, parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser = add_common_attack_args(parser)
parser.add_argument("--sig_f", type=float)
parser.add_argument('--bd_yaml_path', type=str, default='./config/attack/sig/default.yaml',
help='path for yaml file provide additional default attributes')
return parser
def stage1_non_training_data_prepare(self):
logging.info(f"stage1 start")
assert 'args' in self.__dict__
args = self.args
train_dataset_without_transform, \
train_img_transform, \
train_label_transform, \
test_dataset_without_transform, \
test_img_transform, \
test_label_transform, \
clean_train_dataset_with_transform, \
clean_train_dataset_targets, \
clean_test_dataset_with_transform, \
clean_test_dataset_targets \
= self.benign_prepare()
train_bd_img_transform, test_bd_img_transform = bd_attack_img_trans_generate(args)
### get the backdoor transform on label
bd_label_transform = bd_attack_label_trans_generate(args)
### 4. set the backdoor attack data and backdoor test data
train_poison_index = generate_poison_index_from_label_transform(
clean_train_dataset_targets,
label_transform=bd_label_transform,
train=True,
pratio=args.pratio if 'pratio' in args.__dict__ else None,
p_num=args.p_num if 'p_num' in args.__dict__ else None,
clean_label=True,
)
logging.debug(f"poison train idx is saved")
torch.save(train_poison_index,
args.save_path + '/train_poison_index_list.pickle',
)
### generate train dataset for backdoor attack
bd_train_dataset = prepro_cls_DatasetBD_v2(
deepcopy(train_dataset_without_transform),
poison_indicator=train_poison_index,
bd_image_pre_transform=train_bd_img_transform,
bd_label_pre_transform=bd_label_transform,
save_folder_path=f"{args.save_path}/bd_train_dataset",
)
bd_train_dataset_with_transform = dataset_wrapper_with_transform(
bd_train_dataset,
train_img_transform,
train_label_transform,
)
### decide which img to poison in ASR Test
test_poison_index = generate_poison_index_from_label_transform(
clean_test_dataset_targets,
label_transform=bd_label_transform,
train=False,
)
### generate test dataset for ASR
bd_test_dataset = prepro_cls_DatasetBD_v2(
deepcopy(test_dataset_without_transform),
poison_indicator=test_poison_index,
bd_image_pre_transform=test_bd_img_transform,
bd_label_pre_transform=bd_label_transform,
save_folder_path=f"{args.save_path}/bd_test_dataset",
)
bd_test_dataset.subset(
np.where(test_poison_index == 1)[0]
)
bd_test_dataset_with_transform = dataset_wrapper_with_transform(
bd_test_dataset,
test_img_transform,
test_label_transform,
)
self.stage1_results = clean_train_dataset_with_transform, \
clean_test_dataset_with_transform, \
bd_train_dataset_with_transform, \
bd_test_dataset_with_transform
if __name__ == '__main__':
attack = SIG()
parser = argparse.ArgumentParser(description=sys.argv[0])
parser = attack.set_args(parser)
parser = attack.set_bd_args(parser)
args = parser.parse_args()
attack.add_bd_yaml_to_args(args)
attack.add_yaml_to_args(args)
args = attack.process_args(args)
attack.prepare(args)
attack.stage1_non_training_data_prepare()
attack.stage2_training()