mirror of
https://github.com/Shawn-Shan/fawkes.git
synced 2024-12-22 07:09:33 +05:30
add debug option and progbar
This commit is contained in:
parent
5b013a01fc
commit
6fe3a7c3fd
@ -4,17 +4,18 @@
|
||||
# @Link : https://www.shawnshan.com/
|
||||
|
||||
|
||||
__version__ = '0.0.8'
|
||||
__version__ = '0.0.9'
|
||||
|
||||
from .detect_faces import create_mtcnn, run_detect_face
|
||||
from .differentiator import FawkesMaskGeneration
|
||||
from .protection import main, Fawkes
|
||||
from .utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, get_file
|
||||
from .utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, get_file, \
|
||||
filter_image_paths
|
||||
|
||||
__all__ = (
|
||||
'__version__', 'create_mtcnn', 'run_detect_face',
|
||||
'FawkesMaskGeneration', 'load_extractor',
|
||||
'init_gpu',
|
||||
'select_target_label', 'dump_image', 'reverse_process_cloaked',
|
||||
'Faces', 'get_file', 'main', 'Fawkes'
|
||||
'Faces', 'get_file', 'filter_image_paths', 'main', 'Fawkes'
|
||||
)
|
||||
|
@ -10,8 +10,8 @@ from decimal import Decimal
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from fawkes.utils import preprocess, reverse_preprocess
|
||||
from keras.utils import Progbar
|
||||
|
||||
|
||||
class FawkesMaskGeneration:
|
||||
@ -226,8 +226,6 @@ class FawkesMaskGeneration:
|
||||
|
||||
self.init = tf.variables_initializer(var_list=[self.modifier] + new_vars)
|
||||
|
||||
print('Attacker loaded')
|
||||
|
||||
def preprocess_arctanh(self, imgs):
|
||||
|
||||
imgs = reverse_preprocess(imgs, self.intensity_range)
|
||||
@ -276,7 +274,7 @@ class FawkesMaskGeneration:
|
||||
adv_imgs.extend(adv_img)
|
||||
|
||||
elapsed_time = time.time() - start_time
|
||||
print('attack cost %f s' % (elapsed_time))
|
||||
print('protection cost %f s' % (elapsed_time))
|
||||
|
||||
return np.array(adv_imgs)
|
||||
|
||||
@ -356,84 +354,70 @@ class FawkesMaskGeneration:
|
||||
bottlesim_sum / nb_imgs))
|
||||
|
||||
finished_idx = set()
|
||||
try:
|
||||
total_distance = [0] * nb_imgs
|
||||
total_distance = [0] * nb_imgs
|
||||
|
||||
if self.limit_dist:
|
||||
dist_raw_list, bottlesim_list, aimg_input_list = self.sess.run(
|
||||
[self.dist_raw,
|
||||
self.bottlesim,
|
||||
self.aimg_input])
|
||||
for e, (dist_raw, bottlesim, aimg_input) in enumerate(
|
||||
zip(dist_raw_list, bottlesim_list, aimg_input_list)):
|
||||
if e >= nb_imgs:
|
||||
break
|
||||
total_distance[e] = bottlesim
|
||||
|
||||
for iteration in range(self.MAX_ITERATIONS):
|
||||
|
||||
self.sess.run([self.train], feed_dict={self.learning_rate_holder: LR})
|
||||
|
||||
dist_raw_list, bottlesim_list, aimg_input_list = self.sess.run(
|
||||
[self.dist_raw,
|
||||
self.bottlesim,
|
||||
self.aimg_input])
|
||||
|
||||
all_clear = True
|
||||
for e, (dist_raw, bottlesim, aimg_input) in enumerate(
|
||||
zip(dist_raw_list, bottlesim_list, aimg_input_list)):
|
||||
|
||||
if e in finished_idx:
|
||||
continue
|
||||
|
||||
if e >= nb_imgs:
|
||||
break
|
||||
if (bottlesim < best_bottlesim[e] and bottlesim > total_distance[e] * 0.1 and (
|
||||
not self.maximize)) or (
|
||||
bottlesim > best_bottlesim[e] and self.maximize):
|
||||
best_bottlesim[e] = bottlesim
|
||||
best_adv[e] = aimg_input
|
||||
|
||||
# if iteration > 20 and (dist_raw >= self.l_threshold or iteration == self.MAX_ITERATIONS - 1):
|
||||
# finished_idx.add(e)
|
||||
# print("{} finished at dist {}".format(e, dist_raw))
|
||||
# best_bottlesim[e] = bottlesim
|
||||
# best_adv[e] = aimg_input
|
||||
#
|
||||
all_clear = False
|
||||
|
||||
if all_clear:
|
||||
if self.limit_dist:
|
||||
dist_raw_list, bottlesim_list, aimg_input_list = self.sess.run(
|
||||
[self.dist_raw,
|
||||
self.bottlesim,
|
||||
self.aimg_input])
|
||||
for e, (dist_raw, bottlesim, aimg_input) in enumerate(
|
||||
zip(dist_raw_list, bottlesim_list, aimg_input_list)):
|
||||
if e >= nb_imgs:
|
||||
break
|
||||
total_distance[e] = bottlesim
|
||||
|
||||
if iteration != 0 and iteration % (self.MAX_ITERATIONS // 2) == 0:
|
||||
LR = LR * 0.8
|
||||
print("Learning Rate: ", LR)
|
||||
if self.verbose == 0:
|
||||
progressbar = Progbar(
|
||||
self.MAX_ITERATIONS, width=30, verbose=1
|
||||
)
|
||||
|
||||
if iteration % (self.MAX_ITERATIONS // 5) == 0:
|
||||
if self.verbose == 1:
|
||||
dist_raw_sum = float(self.sess.run(self.dist_raw_sum))
|
||||
bottlesim_sum = self.sess.run(self.bottlesim_sum)
|
||||
print('ITER %4d perturb: %.5f; sim: %f'
|
||||
% (iteration, dist_raw_sum / nb_imgs, bottlesim_sum / nb_imgs))
|
||||
for iteration in range(self.MAX_ITERATIONS):
|
||||
|
||||
# protected_images = aimg_input_list
|
||||
#
|
||||
# orginal_images = np.copy(self.faces.cropped_faces)
|
||||
# cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked(
|
||||
# orginal_images)
|
||||
# final_images = self.faces.merge_faces(cloak_perturbation)
|
||||
#
|
||||
# for p_img, img in zip(protected_images, final_images):
|
||||
# dump_image(reverse_process_cloaked(p_img),
|
||||
# "/home/shansixioing/fawkes/data/emily/emily_cloaked_cropped{}.png".format(iteration),
|
||||
# format='png')
|
||||
#
|
||||
# dump_image(img,
|
||||
# "/home/shansixioing/fawkes/data/emily/emily_cloaked_{}.png".format(iteration),
|
||||
# format='png')
|
||||
self.sess.run([self.train], feed_dict={self.learning_rate_holder: LR})
|
||||
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
dist_raw_list, bottlesim_list, aimg_input_list = self.sess.run(
|
||||
[self.dist_raw,
|
||||
self.bottlesim,
|
||||
self.aimg_input])
|
||||
|
||||
all_clear = True
|
||||
for e, (dist_raw, bottlesim, aimg_input) in enumerate(
|
||||
zip(dist_raw_list, bottlesim_list, aimg_input_list)):
|
||||
|
||||
if e in finished_idx:
|
||||
continue
|
||||
|
||||
if e >= nb_imgs:
|
||||
break
|
||||
if (bottlesim < best_bottlesim[e] and bottlesim > total_distance[e] * 0.1 and (
|
||||
not self.maximize)) or (
|
||||
bottlesim > best_bottlesim[e] and self.maximize):
|
||||
best_bottlesim[e] = bottlesim
|
||||
best_adv[e] = aimg_input
|
||||
|
||||
# if iteration > 20 and (dist_raw >= self.l_threshold or iteration == self.MAX_ITERATIONS - 1):
|
||||
# finished_idx.add(e)
|
||||
# print("{} finished at dist {}".format(e, dist_raw))
|
||||
# best_bottlesim[e] = bottlesim
|
||||
# best_adv[e] = aimg_input
|
||||
#
|
||||
all_clear = False
|
||||
|
||||
if all_clear:
|
||||
break
|
||||
|
||||
if iteration != 0 and iteration % (self.MAX_ITERATIONS // 2) == 0:
|
||||
LR = LR * 0.8
|
||||
|
||||
if iteration % (self.MAX_ITERATIONS // 5) == 0:
|
||||
if self.verbose == 1:
|
||||
dist_raw_sum = float(self.sess.run(self.dist_raw_sum))
|
||||
bottlesim_sum = self.sess.run(self.bottlesim_sum)
|
||||
print('ITER %4d perturb: %.5f; sim: %f'
|
||||
% (iteration, dist_raw_sum / nb_imgs, bottlesim_sum / nb_imgs))
|
||||
if self.verbose == 0:
|
||||
progressbar.update(iteration)
|
||||
|
||||
if self.verbose == 1:
|
||||
loss_sum = float(self.sess.run(self.loss_sum))
|
||||
@ -445,7 +429,6 @@ class FawkesMaskGeneration:
|
||||
dist_sum,
|
||||
dist_raw_sum,
|
||||
bottlesim_sum / nb_imgs))
|
||||
|
||||
print("\n")
|
||||
best_adv = self.clipping(best_adv[:nb_imgs])
|
||||
|
||||
return best_adv
|
||||
|
@ -17,15 +17,17 @@ logging.getLogger('tensorflow').disabled = True
|
||||
import numpy as np
|
||||
from fawkes.differentiator import FawkesMaskGeneration
|
||||
from fawkes.utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, \
|
||||
Faces
|
||||
Faces, filter_image_paths
|
||||
|
||||
from fawkes.align_face import aligner
|
||||
from fawkes.utils import get_file
|
||||
|
||||
random.seed(12243)
|
||||
np.random.seed(122412)
|
||||
|
||||
|
||||
def generate_cloak_images(sess, feature_extractors, image_X, target_emb=None, th=0.01, faces=None, sd=1e9, lr=2,
|
||||
max_step=500, batch_size=1):
|
||||
max_step=500, batch_size=1, debug=False):
|
||||
batch_size = batch_size if len(image_X) > batch_size else len(image_X)
|
||||
|
||||
differentiator = FawkesMaskGeneration(sess, feature_extractors,
|
||||
@ -36,7 +38,7 @@ def generate_cloak_images(sess, feature_extractors, image_X, target_emb=None, th
|
||||
learning_rate=lr,
|
||||
max_iterations=max_step,
|
||||
l_threshold=th,
|
||||
verbose=1, maximize=False, keep_final=False, image_shape=image_X.shape[1:],
|
||||
verbose=1 if debug else 0, maximize=False, keep_final=False, image_shape=image_X.shape[1:],
|
||||
faces=faces)
|
||||
|
||||
cloaked_image_X = differentiator.attack(image_X, target_emb)
|
||||
@ -55,13 +57,14 @@ def check_imgs(imgs):
|
||||
|
||||
class Fawkes(object):
|
||||
def __init__(self, feature_extractor, gpu, batch_size):
|
||||
global graph
|
||||
graph = tf.get_default_graph()
|
||||
|
||||
self.feature_extractor = feature_extractor
|
||||
self.gpu = gpu
|
||||
self.batch_size = batch_size
|
||||
self.sess = init_gpu(gpu)
|
||||
global sess
|
||||
sess = init_gpu(gpu)
|
||||
global graph
|
||||
graph = tf.get_default_graph()
|
||||
|
||||
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
|
||||
if not os.path.exists(os.path.join(model_dir, "mtcnn.p.gz")):
|
||||
@ -69,11 +72,11 @@ class Fawkes(object):
|
||||
get_file("mtcnn.p.gz", "http://sandlab.cs.uchicago.edu/fawkes/files/mtcnn.p.gz", cache_dir=model_dir,
|
||||
cache_subdir='')
|
||||
|
||||
self.aligner = aligner(self.sess)
|
||||
self.fs_names = [feature_extractor]
|
||||
if isinstance(feature_extractor, list):
|
||||
self.fs_names = feature_extractor
|
||||
|
||||
self.aligner = aligner(sess)
|
||||
self.feature_extractors_ls = [load_extractor(name) for name in self.fs_names]
|
||||
|
||||
def mode2param(self, mode):
|
||||
@ -101,48 +104,52 @@ class Fawkes(object):
|
||||
return th, max_step, lr
|
||||
|
||||
def run_protection(self, image_paths, mode='mid', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png',
|
||||
separate_target=True):
|
||||
separate_target=True, debug=False):
|
||||
|
||||
if mode == 'custom':
|
||||
pass
|
||||
else:
|
||||
th, max_step, lr = self.mode2param(mode)
|
||||
|
||||
image_paths, loaded_images = filter_image_paths(image_paths)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
if not image_paths:
|
||||
raise Exception("No images in the directory")
|
||||
with graph.as_default():
|
||||
faces = Faces(image_paths, self.aligner, verbose=1)
|
||||
faces = Faces(image_paths, loaded_images, self.aligner, verbose=1)
|
||||
|
||||
original_images = faces.cropped_faces
|
||||
original_images = np.array(original_images)
|
||||
|
||||
if separate_target:
|
||||
target_embedding = []
|
||||
for org_img in original_images:
|
||||
org_img = org_img.reshape([1] + list(org_img.shape))
|
||||
tar_emb = select_target_label(org_img, self.feature_extractors_ls, self.fs_names)
|
||||
target_embedding.append(tar_emb)
|
||||
target_embedding = np.concatenate(target_embedding)
|
||||
else:
|
||||
target_embedding = select_target_label(original_images, self.feature_extractors_ls, self.fs_names)
|
||||
with sess.as_default():
|
||||
if separate_target:
|
||||
target_embedding = []
|
||||
for org_img in original_images:
|
||||
org_img = org_img.reshape([1] + list(org_img.shape))
|
||||
tar_emb = select_target_label(org_img, self.feature_extractors_ls, self.fs_names)
|
||||
target_embedding.append(tar_emb)
|
||||
target_embedding = np.concatenate(target_embedding)
|
||||
else:
|
||||
target_embedding = select_target_label(original_images, self.feature_extractors_ls, self.fs_names)
|
||||
|
||||
protected_images = generate_cloak_images(self.sess, self.feature_extractors_ls, original_images,
|
||||
target_emb=target_embedding, th=th, faces=faces, sd=sd,
|
||||
lr=lr, max_step=max_step, batch_size=batch_size)
|
||||
protected_images = generate_cloak_images(sess, self.feature_extractors_ls, original_images,
|
||||
target_emb=target_embedding, th=th, faces=faces, sd=sd,
|
||||
lr=lr, max_step=max_step, batch_size=batch_size, debug=debug)
|
||||
|
||||
faces.cloaked_cropped_faces = protected_images
|
||||
faces.cloaked_cropped_faces = protected_images
|
||||
|
||||
cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked(original_images)
|
||||
final_images = faces.merge_faces(cloak_perturbation)
|
||||
cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked(
|
||||
original_images)
|
||||
final_images = faces.merge_faces(cloak_perturbation)
|
||||
|
||||
for p_img, cloaked_img, path in zip(final_images, protected_images, image_paths):
|
||||
file_name = "{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), mode, format)
|
||||
dump_image(p_img, file_name, format=format)
|
||||
|
||||
elapsed_time = time.time() - start_time
|
||||
print('attack cost %f s' % elapsed_time)
|
||||
print("Done!")
|
||||
for p_img, path in zip(final_images, image_paths):
|
||||
file_name = "{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), mode, format)
|
||||
dump_image(p_img, file_name, format=format)
|
||||
# elapsed_time = time.time() - start_time
|
||||
print("Done!")
|
||||
return None
|
||||
|
||||
|
||||
def main(*argv):
|
||||
@ -175,6 +182,7 @@ def main(*argv):
|
||||
|
||||
parser.add_argument('--batch-size', type=int, default=1)
|
||||
parser.add_argument('--separate_target', action='store_true')
|
||||
parser.add_argument('--debug', action='store_true')
|
||||
|
||||
parser.add_argument('--format', type=str,
|
||||
help="final image format",
|
||||
@ -192,7 +200,7 @@ def main(*argv):
|
||||
protector = Fawkes(args.feature_extractor, args.gpu, args.batch_size)
|
||||
protector.run_protection(image_paths, mode=args.mode, th=args.th, sd=args.sd, lr=args.lr, max_step=args.max_step,
|
||||
batch_size=args.batch_size, format=args.format,
|
||||
separate_target=args.separate_target)
|
||||
separate_target=args.separate_target, debug=args.debug)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -55,6 +55,7 @@ if sys.version_info[0] == 2:
|
||||
else:
|
||||
from six.moves.urllib.request import urlretrieve
|
||||
|
||||
|
||||
def clip_img(X, preprocessing='raw'):
|
||||
X = reverse_preprocess(X, preprocessing)
|
||||
X = np.clip(X, 0.0, 255.0)
|
||||
@ -67,6 +68,8 @@ def load_image(path):
|
||||
img = Image.open(path)
|
||||
except PIL.UnidentifiedImageError:
|
||||
return None
|
||||
except IsADirectoryError:
|
||||
return None
|
||||
|
||||
if img._getexif() is not None:
|
||||
for orientation in ExifTags.TAGS.keys():
|
||||
@ -89,9 +92,24 @@ def load_image(path):
|
||||
return image_array
|
||||
|
||||
|
||||
class Faces(object):
|
||||
def __init__(self, image_paths, aligner, verbose=1, eval_local=False):
|
||||
def filter_image_paths(image_paths):
|
||||
print("Identify {} files in the directory".format(len(image_paths)))
|
||||
new_image_paths = []
|
||||
new_images = []
|
||||
for p in image_paths:
|
||||
img = load_image(p)
|
||||
if img is None:
|
||||
print("{} is not an image file, skipped".format(p.split("/")[-1]))
|
||||
continue
|
||||
new_image_paths.append(p)
|
||||
new_images.append(img)
|
||||
print("Identify {} images in the directory".format(len(new_image_paths)))
|
||||
return new_image_paths, new_images
|
||||
|
||||
|
||||
class Faces(object):
|
||||
def __init__(self, image_paths, loaded_images, aligner, verbose=1, eval_local=False):
|
||||
self.image_paths = image_paths
|
||||
self.verbose = verbose
|
||||
self.aligner = aligner
|
||||
self.org_faces = []
|
||||
@ -99,12 +117,9 @@ class Faces(object):
|
||||
self.cropped_faces_shape = []
|
||||
self.cropped_index = []
|
||||
self.callback_idx = []
|
||||
if verbose:
|
||||
print("Identify {} images".format(len(image_paths)))
|
||||
for i, p in enumerate(image_paths):
|
||||
cur_img = load_image(p)
|
||||
if cur_img is None:
|
||||
continue
|
||||
for i in range(0, len(loaded_images)):
|
||||
cur_img = loaded_images[i]
|
||||
p = image_paths[i]
|
||||
|
||||
self.org_faces.append(cur_img)
|
||||
|
||||
@ -115,7 +130,7 @@ class Faces(object):
|
||||
align_img = align(cur_img, self.aligner, margin=margin)
|
||||
|
||||
if align_img is None:
|
||||
print("Find 0 face(s) in {}".format(p.split("/")[-1]))
|
||||
print("Find 0 face(s)".format(p.split("/")[-1]))
|
||||
continue
|
||||
|
||||
cur_faces = align_img[0]
|
||||
@ -143,8 +158,7 @@ class Faces(object):
|
||||
self.callback_idx.extend([i] * len(cur_faces_square))
|
||||
|
||||
if not self.cropped_faces:
|
||||
print("No faces detected")
|
||||
exit(1)
|
||||
raise Exception("No faces detected")
|
||||
|
||||
self.cropped_faces = np.array(self.cropped_faces)
|
||||
|
||||
@ -469,8 +483,11 @@ def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, m
|
||||
embs = np.array(embs)
|
||||
|
||||
pair_dist = pairwise_l2_distance(original_feature_x, embs)
|
||||
pair_dist = np.array(pair_dist)
|
||||
|
||||
max_sum = np.min(pair_dist, axis=0)
|
||||
max_id = np.argmax(max_sum)
|
||||
max_id_ls = np.argsort(max_sum)[::-1]
|
||||
max_id = random.choice(max_id_ls[:20])
|
||||
|
||||
target_data_id = paths[int(max_id)]
|
||||
image_dir = os.path.join(model_dir, "target_data/{}".format(target_data_id))
|
||||
@ -480,9 +497,12 @@ def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, m
|
||||
for i in range(10):
|
||||
if os.path.exists(os.path.join(model_dir, "target_data/{}/{}.jpg".format(target_data_id, i))):
|
||||
continue
|
||||
get_file("{}.jpg".format(i),
|
||||
"http://sandlab.cs.uchicago.edu/fawkes/files/target_data/{}/{}.jpg".format(target_data_id, i),
|
||||
cache_dir=model_dir, cache_subdir='target_data/{}/'.format(target_data_id))
|
||||
try:
|
||||
get_file("{}.jpg".format(i),
|
||||
"http://sandlab.cs.uchicago.edu/fawkes/files/target_data/{}/{}.jpg".format(target_data_id, i),
|
||||
cache_dir=model_dir, cache_subdir='target_data/{}/'.format(target_data_id))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
image_paths = glob.glob(image_dir + "/*.jpg")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user