mirror of
https://github.com/Shawn-Shan/fawkes.git
synced 2024-12-22 07:09:33 +05:30
prepare for 0.3 release
This commit is contained in:
parent
5e25900372
commit
972e5e1a8a
@ -1,3 +1,9 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# @Date : 2020-07-01
|
||||||
|
# @Author : Shawn Shan (shansixiong@cs.uchicago.edu)
|
||||||
|
# @Link : https://www.shawnshan.com/
|
||||||
|
|
||||||
|
|
||||||
from .protection import main
|
from .protection import main
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -1,3 +1,31 @@
|
|||||||
|
"""Performs face alignment and stores face thumbnails in the output directory."""
|
||||||
|
# MIT License
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016 David Sandberg
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in all
|
||||||
|
# copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
# SOFTWARE.
|
||||||
|
|
||||||
|
""" Tensorflow implementation of the face detection / alignment algorithm found at
|
||||||
|
https://github.com/kpzhang93/MTCNN_face_detection_alignment
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from fawkes import create_mtcnn, run_detect_face
|
from fawkes import create_mtcnn, run_detect_face
|
||||||
|
|
||||||
@ -19,8 +47,8 @@ def aligner(sess):
|
|||||||
|
|
||||||
def align(orig_img, aligner, margin=0.8, detect_multiple_faces=True):
|
def align(orig_img, aligner, margin=0.8, detect_multiple_faces=True):
|
||||||
pnet, rnet, onet = aligner
|
pnet, rnet, onet = aligner
|
||||||
minsize = 20 # minimum size of face
|
minsize = 25 # minimum size of face
|
||||||
threshold = [0.6, 0.7, 0.7] # three steps's threshold
|
threshold = [0.85, 0.85, 0.85] # three steps's threshold
|
||||||
factor = 0.709 # scale factor
|
factor = 0.709 # scale factor
|
||||||
|
|
||||||
if orig_img.ndim < 2:
|
if orig_img.ndim < 2:
|
||||||
|
@ -1,3 +1,26 @@
|
|||||||
|
"""Performs face alignment and stores face thumbnails in the output directory."""
|
||||||
|
# MIT License
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016 David Sandberg
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in all
|
||||||
|
# copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
# SOFTWARE.
|
||||||
|
|
||||||
""" Tensorflow implementation of the face detection / alignment algorithm found at
|
""" Tensorflow implementation of the face detection / alignment algorithm found at
|
||||||
https://github.com/kpzhang93/MTCNN_face_detection_alignment
|
https://github.com/kpzhang93/MTCNN_face_detection_alignment
|
||||||
"""
|
"""
|
||||||
@ -9,7 +32,6 @@ import pickle
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
from six import string_types, iteritems
|
from six import string_types, iteritems
|
||||||
from skimage.transform import resize
|
|
||||||
|
|
||||||
|
|
||||||
def layer(op):
|
def layer(op):
|
||||||
|
@ -161,7 +161,13 @@ class FawkesMaskGeneration:
|
|||||||
|
|
||||||
def calculate_direction(bottleneck_model, cur_timg_input, cur_simg_input):
|
def calculate_direction(bottleneck_model, cur_timg_input, cur_simg_input):
|
||||||
target_features = bottleneck_model(cur_timg_input)
|
target_features = bottleneck_model(cur_timg_input)
|
||||||
return target_features
|
# return target_features
|
||||||
|
target_center = tf.reduce_mean(target_features, axis=0)
|
||||||
|
original = bottleneck_model(cur_simg_input)
|
||||||
|
original_center = tf.reduce_mean(original, axis=0)
|
||||||
|
direction = target_center - original_center
|
||||||
|
final_target = original + 2.0 * direction
|
||||||
|
return final_target
|
||||||
|
|
||||||
self.bottlesim = 0.0
|
self.bottlesim = 0.0
|
||||||
self.bottlesim_sum = 0.0
|
self.bottlesim_sum = 0.0
|
||||||
@ -280,14 +286,9 @@ class FawkesMaskGeneration:
|
|||||||
|
|
||||||
def attack_batch(self, source_imgs, target_imgs, weights):
|
def attack_batch(self, source_imgs, target_imgs, weights):
|
||||||
|
|
||||||
"""
|
|
||||||
Run the attack on a batch of images and labels.
|
|
||||||
"""
|
|
||||||
|
|
||||||
LR = self.learning_rate
|
LR = self.learning_rate
|
||||||
nb_imgs = source_imgs.shape[0]
|
nb_imgs = source_imgs.shape[0]
|
||||||
mask = [True] * nb_imgs + [False] * (self.batch_size - nb_imgs)
|
mask = [True] * nb_imgs + [False] * (self.batch_size - nb_imgs)
|
||||||
# mask = [True] * self.batch_size
|
|
||||||
mask = np.array(mask, dtype=np.bool)
|
mask = np.array(mask, dtype=np.bool)
|
||||||
|
|
||||||
source_imgs = np.array(source_imgs)
|
source_imgs = np.array(source_imgs)
|
||||||
@ -324,16 +325,6 @@ class FawkesMaskGeneration:
|
|||||||
self.assign_mask: mask,
|
self.assign_mask: mask,
|
||||||
self.assign_weights: weights_batch,
|
self.assign_weights: weights_batch,
|
||||||
self.assign_modifier: modifier_batch})
|
self.assign_modifier: modifier_batch})
|
||||||
else:
|
|
||||||
# if directly mimicking a vector, use assign_bottleneck_t_raw
|
|
||||||
# in setup
|
|
||||||
self.sess.run(self.setup,
|
|
||||||
{self.assign_bottleneck_t_raw: timg_tanh_batch,
|
|
||||||
self.assign_simg_tanh: simg_tanh_batch,
|
|
||||||
self.assign_const: CONST,
|
|
||||||
self.assign_mask: mask,
|
|
||||||
self.assign_weights: weights_batch,
|
|
||||||
self.assign_modifier: modifier_batch})
|
|
||||||
|
|
||||||
best_bottlesim = [0] * nb_imgs if self.maximize else [np.inf] * nb_imgs
|
best_bottlesim = [0] * nb_imgs if self.maximize else [np.inf] * nb_imgs
|
||||||
best_adv = np.zeros_like(source_imgs)
|
best_adv = np.zeros_like(source_imgs)
|
||||||
@ -394,12 +385,6 @@ class FawkesMaskGeneration:
|
|||||||
best_bottlesim[e] = bottlesim
|
best_bottlesim[e] = bottlesim
|
||||||
best_adv[e] = aimg_input
|
best_adv[e] = aimg_input
|
||||||
|
|
||||||
# if iteration > 20 and (dist_raw >= self.l_threshold or iteration == self.MAX_ITERATIONS - 1):
|
|
||||||
# finished_idx.add(e)
|
|
||||||
# print("{} finished at dist {}".format(e, dist_raw))
|
|
||||||
# best_bottlesim[e] = bottlesim
|
|
||||||
# best_adv[e] = aimg_input
|
|
||||||
#
|
|
||||||
all_clear = False
|
all_clear = False
|
||||||
|
|
||||||
if all_clear:
|
if all_clear:
|
||||||
|
66
fawkes/master.py
Normal file
66
fawkes/master.py
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
import socket
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
print(socket.gethostname())
|
||||||
|
|
||||||
|
|
||||||
|
def assign_gpu(args, gpu_idx):
|
||||||
|
for i, arg in enumerate(args):
|
||||||
|
if arg == "GPUID":
|
||||||
|
args[i] = str(gpu_idx)
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
def produce_present():
|
||||||
|
process_ls = []
|
||||||
|
gpu_ls = list(sys.argv[1])
|
||||||
|
max_num = int(sys.argv[2])
|
||||||
|
|
||||||
|
available_gpus = []
|
||||||
|
i = 0
|
||||||
|
while len(available_gpus) < max_num:
|
||||||
|
if i > len(gpu_ls) - 1:
|
||||||
|
i = 0
|
||||||
|
available_gpus.append(gpu_ls[i])
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
process_dict = {}
|
||||||
|
all_queries_to_run = []
|
||||||
|
|
||||||
|
for m in ['mid', 'low', 'min']:
|
||||||
|
for directory in ['KimKardashian', 'Liuyifei', 'Obama', 'TaylorSwift', 'TomHolland']:
|
||||||
|
args = ['python3', 'protection.py', '--gpu', 'GPUID', '-d',
|
||||||
|
'/home/shansixioing/fawkes/data/test/{}/'.format(directory),
|
||||||
|
'--batch-size', '30', '-m', m,
|
||||||
|
'--debug']
|
||||||
|
args = [str(x) for x in args]
|
||||||
|
all_queries_to_run.append(args)
|
||||||
|
|
||||||
|
for args in all_queries_to_run:
|
||||||
|
cur_gpu = available_gpus.pop(0)
|
||||||
|
args = assign_gpu(args, cur_gpu)
|
||||||
|
print(" ".join(args))
|
||||||
|
p = subprocess.Popen(args)
|
||||||
|
process_ls.append(p)
|
||||||
|
process_dict[p] = cur_gpu
|
||||||
|
|
||||||
|
gpu_ls.append(cur_gpu)
|
||||||
|
time.sleep(5)
|
||||||
|
while not available_gpus:
|
||||||
|
for p in process_ls:
|
||||||
|
poll = p.poll()
|
||||||
|
if poll is not None:
|
||||||
|
process_ls.remove(p)
|
||||||
|
available_gpus.append(process_dict[p])
|
||||||
|
|
||||||
|
time.sleep(20)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
produce_present()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
@ -1,6 +1,8 @@
|
|||||||
# from __future__ import absolute_import
|
#!/usr/bin/env python
|
||||||
# from __future__ import division
|
# -*- coding: utf-8 -*-
|
||||||
# from __future__ import print_function
|
# @Date : 2020-05-17
|
||||||
|
# @Author : Shawn Shan (shansixiong@cs.uchicago.edu)
|
||||||
|
# @Link : https://www.shawnshan.com/
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import glob
|
import glob
|
||||||
@ -54,14 +56,18 @@ class Fawkes(object):
|
|||||||
self.protector_param = None
|
self.protector_param = None
|
||||||
|
|
||||||
def mode2param(self, mode):
|
def mode2param(self, mode):
|
||||||
if mode == 'low':
|
if mode == 'min':
|
||||||
th = 0.0025
|
th = 0.002
|
||||||
max_step = 30
|
max_step = 20
|
||||||
lr = 30
|
lr = 40
|
||||||
|
elif mode == 'low':
|
||||||
|
th = 0.003
|
||||||
|
max_step = 50
|
||||||
|
lr = 35
|
||||||
elif mode == 'mid':
|
elif mode == 'mid':
|
||||||
th = 0.005
|
th = 0.005
|
||||||
max_step = 100
|
max_step = 200
|
||||||
lr = 15
|
lr = 20
|
||||||
elif mode == 'high':
|
elif mode == 'high':
|
||||||
th = 0.008
|
th = 0.008
|
||||||
max_step = 500
|
max_step = 500
|
||||||
@ -77,7 +83,7 @@ class Fawkes(object):
|
|||||||
raise Exception("mode must be one of 'low', 'mid', 'high', 'ultra', 'custom'")
|
raise Exception("mode must be one of 'low', 'mid', 'high', 'ultra', 'custom'")
|
||||||
return th, max_step, lr
|
return th, max_step, lr
|
||||||
|
|
||||||
def run_protection(self, image_paths, mode='low', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png',
|
def run_protection(self, image_paths, mode='min', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png',
|
||||||
separate_target=True, debug=False):
|
separate_target=True, debug=False):
|
||||||
if mode == 'custom':
|
if mode == 'custom':
|
||||||
pass
|
pass
|
||||||
@ -137,10 +143,6 @@ class Fawkes(object):
|
|||||||
|
|
||||||
faces.cloaked_cropped_faces = protected_images
|
faces.cloaked_cropped_faces = protected_images
|
||||||
|
|
||||||
# cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked(
|
|
||||||
# original_images)
|
|
||||||
# final_images = faces.merge_faces(cloak_perturbation)
|
|
||||||
|
|
||||||
final_images = faces.merge_faces(reverse_process_cloaked(protected_images),
|
final_images = faces.merge_faces(reverse_process_cloaked(protected_images),
|
||||||
reverse_process_cloaked(original_images))
|
reverse_process_cloaked(original_images))
|
||||||
|
|
||||||
@ -164,28 +166,34 @@ def main(*argv):
|
|||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('--directory', '-d', type=str,
|
parser.add_argument('--directory', '-d', type=str,
|
||||||
help='directory that contain images for cloaking', default='imgs/')
|
help='the directory that contains images to run protection', default='imgs/')
|
||||||
|
|
||||||
parser.add_argument('--gpu', '-g', type=str,
|
parser.add_argument('--gpu', '-g', type=str,
|
||||||
help='GPU id', default='0')
|
help='the GPU id when using GPU for optimization', default='0')
|
||||||
|
|
||||||
parser.add_argument('--mode', '-m', type=str,
|
parser.add_argument('--mode', '-m', type=str,
|
||||||
help='cloak generation mode', default='low')
|
help='cloak generation mode, select from min, low, mid, high. The higher the mode is, the more perturbation added and stronger protection',
|
||||||
|
default='min')
|
||||||
|
|
||||||
parser.add_argument('--feature-extractor', type=str,
|
parser.add_argument('--feature-extractor', type=str,
|
||||||
help="name of the feature extractor used for optimization",
|
help="name of the feature extractor used for optimization, currently only support high_extract",
|
||||||
default="high_extract")
|
default="high_extract")
|
||||||
|
|
||||||
parser.add_argument('--th', type=float, default=0.01)
|
parser.add_argument('--th', help='only relevant with mode=custom, DSSIM threshold for perturbation', type=float,
|
||||||
parser.add_argument('--max-step', type=int, default=1000)
|
default=0.01)
|
||||||
parser.add_argument('--sd', type=int, default=1e9)
|
parser.add_argument('--max-step', help='only relevant with mode=custom, number of steps for optimization', type=int,
|
||||||
parser.add_argument('--lr', type=float, default=2)
|
default=1000)
|
||||||
|
parser.add_argument('--sd', type=int, help='only relevant with mode=custom, penalty number, read more in the paper',
|
||||||
parser.add_argument('--batch-size', type=int, default=1)
|
default=1e6)
|
||||||
parser.add_argument('--separate_target', action='store_true')
|
parser.add_argument('--lr', type=float, help='only relevant with mode=custom, learning rate', default=2)
|
||||||
parser.add_argument('--debug', action='store_true')
|
|
||||||
|
|
||||||
|
parser.add_argument('--batch-size', help="number of images to run optimization together", type=int, default=1)
|
||||||
|
parser.add_argument('--separate_target', help="whether select separate targets for each faces in the directory",
|
||||||
|
action='store_true')
|
||||||
|
parser.add_argument('--debug', help="turn on debug and copy/paste the stdout when reporting an issue on github",
|
||||||
|
action='store_true')
|
||||||
parser.add_argument('--format', type=str,
|
parser.add_argument('--format', type=str,
|
||||||
help="final image format",
|
help="format of the output image",
|
||||||
default="png")
|
default="png")
|
||||||
|
|
||||||
args = parser.parse_args(argv[1:])
|
args = parser.parse_args(argv[1:])
|
||||||
@ -198,17 +206,10 @@ def main(*argv):
|
|||||||
image_paths = [path for path in image_paths if "_cloaked" not in path.split("/")[-1]]
|
image_paths = [path for path in image_paths if "_cloaked" not in path.split("/")[-1]]
|
||||||
|
|
||||||
protector = Fawkes(args.feature_extractor, args.gpu, args.batch_size)
|
protector = Fawkes(args.feature_extractor, args.gpu, args.batch_size)
|
||||||
if args.mode != 'all':
|
|
||||||
protector.run_protection(image_paths, mode=args.mode, th=args.th, sd=args.sd, lr=args.lr,
|
protector.run_protection(image_paths, mode=args.mode, th=args.th, sd=args.sd, lr=args.lr,
|
||||||
max_step=args.max_step,
|
max_step=args.max_step,
|
||||||
batch_size=args.batch_size, format=args.format,
|
batch_size=args.batch_size, format=args.format,
|
||||||
separate_target=args.separate_target, debug=args.debug)
|
separate_target=args.separate_target, debug=args.debug)
|
||||||
else:
|
|
||||||
for m in ['low', 'mid', 'high']:
|
|
||||||
protector.run_protection(image_paths, mode=m, th=args.th, sd=args.sd, lr=args.lr,
|
|
||||||
max_step=args.max_step,
|
|
||||||
batch_size=args.batch_size, format=args.format,
|
|
||||||
separate_target=args.separate_target, debug=args.debug)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -1,3 +1,10 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# @Date : 2020-05-17
|
||||||
|
# @Author : Shawn Shan (shansixiong@cs.uchicago.edu)
|
||||||
|
# @Link : https://www.shawnshan.com/
|
||||||
|
|
||||||
|
|
||||||
import errno
|
import errno
|
||||||
import glob
|
import glob
|
||||||
import gzip
|
import gzip
|
||||||
@ -23,10 +30,9 @@ import keras.backend as K
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
from PIL import Image, ExifTags
|
from PIL import Image, ExifTags
|
||||||
from keras.layers import Dense, Activation, Dropout
|
from keras.layers import Dense, Activation
|
||||||
from keras.models import Model
|
from keras.models import Model
|
||||||
from keras.preprocessing import image
|
from keras.preprocessing import image
|
||||||
# from skimage.transform import resize
|
|
||||||
|
|
||||||
from fawkes.align_face import align
|
from fawkes.align_face import align
|
||||||
from six.moves.urllib.request import urlopen
|
from six.moves.urllib.request import urlopen
|
||||||
@ -72,7 +78,12 @@ def load_image(path):
|
|||||||
except IsADirectoryError:
|
except IsADirectoryError:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if img._getexif() is not None:
|
try:
|
||||||
|
info = img._getexif()
|
||||||
|
except OSError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if info is not None:
|
||||||
for orientation in ExifTags.TAGS.keys():
|
for orientation in ExifTags.TAGS.keys():
|
||||||
if ExifTags.TAGS[orientation] == 'Orientation':
|
if ExifTags.TAGS[orientation] == 'Orientation':
|
||||||
break
|
break
|
||||||
@ -109,7 +120,7 @@ def filter_image_paths(image_paths):
|
|||||||
|
|
||||||
|
|
||||||
class Faces(object):
|
class Faces(object):
|
||||||
def __init__(self, image_paths, loaded_images, aligner, verbose=1, eval_local=False):
|
def __init__(self, image_paths, loaded_images, aligner, verbose=1, eval_local=False, preprocessing=True):
|
||||||
self.image_paths = image_paths
|
self.image_paths = image_paths
|
||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
self.aligner = aligner
|
self.aligner = aligner
|
||||||
@ -165,6 +176,7 @@ class Faces(object):
|
|||||||
|
|
||||||
self.cropped_faces = np.array(self.cropped_faces)
|
self.cropped_faces = np.array(self.cropped_faces)
|
||||||
|
|
||||||
|
if preprocessing:
|
||||||
self.cropped_faces = preprocess(self.cropped_faces, 'imagenet')
|
self.cropped_faces = preprocess(self.cropped_faces, 'imagenet')
|
||||||
|
|
||||||
self.cloaked_cropped_faces = None
|
self.cloaked_cropped_faces = None
|
||||||
@ -178,14 +190,12 @@ class Faces(object):
|
|||||||
self.cloaked_faces = np.copy(self.org_faces)
|
self.cloaked_faces = np.copy(self.org_faces)
|
||||||
|
|
||||||
for i in range(len(self.cropped_faces)):
|
for i in range(len(self.cropped_faces)):
|
||||||
# cur_cloak = cloaks[i]
|
|
||||||
cur_protected = protected_images[i]
|
cur_protected = protected_images[i]
|
||||||
cur_original = original_images[i]
|
cur_original = original_images[i]
|
||||||
|
|
||||||
org_shape = self.cropped_faces_shape[i]
|
org_shape = self.cropped_faces_shape[i]
|
||||||
old_square_shape = max([org_shape[0], org_shape[1]])
|
old_square_shape = max([org_shape[0], org_shape[1]])
|
||||||
|
|
||||||
# reshape_cloak = resize(cur_cloak, (old_square_shape, old_square_shape))
|
|
||||||
cur_protected = resize(cur_protected, (old_square_shape, old_square_shape))
|
cur_protected = resize(cur_protected, (old_square_shape, old_square_shape))
|
||||||
cur_original = resize(cur_original, (old_square_shape, old_square_shape))
|
cur_original = resize(cur_original, (old_square_shape, old_square_shape))
|
||||||
|
|
||||||
@ -197,6 +207,8 @@ class Faces(object):
|
|||||||
bb = self.cropped_index[i]
|
bb = self.cropped_index[i]
|
||||||
self.cloaked_faces[callback_id][bb[1]:bb[3], bb[0]:bb[2], :] += reshape_cloak
|
self.cloaked_faces[callback_id][bb[1]:bb[3], bb[0]:bb[2], :] += reshape_cloak
|
||||||
|
|
||||||
|
for i in range(0, len(self.cloaked_faces)):
|
||||||
|
self.cloaked_faces[i] = np.clip(self.cloaked_faces[i], 0.0, 255.0)
|
||||||
return self.cloaked_faces
|
return self.cloaked_faces
|
||||||
|
|
||||||
|
|
||||||
@ -206,12 +218,11 @@ def dump_dictionary_as_json(dict, outfile):
|
|||||||
f.write(j.encode())
|
f.write(j.encode())
|
||||||
|
|
||||||
|
|
||||||
def load_victim_model(number_classes, teacher_model=None, end2end=False, dropout=0):
|
def load_victim_model(number_classes, teacher_model=None, end2end=False):
|
||||||
for l in teacher_model.layers:
|
for l in teacher_model.layers:
|
||||||
l.trainable = end2end
|
l.trainable = end2end
|
||||||
x = teacher_model.layers[-1].output
|
x = teacher_model.layers[-1].output
|
||||||
if dropout > 0:
|
|
||||||
x = Dropout(dropout)(x)
|
|
||||||
x = Dense(number_classes)(x)
|
x = Dense(number_classes)(x)
|
||||||
x = Activation('softmax', name="act")(x)
|
x = Activation('softmax', name="act")(x)
|
||||||
model = Model(teacher_model.input, x)
|
model = Model(teacher_model.input, x)
|
||||||
@ -412,29 +423,12 @@ def get_dataset_path(dataset):
|
|||||||
'num_images']
|
'num_images']
|
||||||
|
|
||||||
|
|
||||||
def normalize(x):
|
|
||||||
return x / np.linalg.norm(x, axis=1, keepdims=True)
|
|
||||||
|
|
||||||
|
|
||||||
def dump_image(x, filename, format="png", scale=False):
|
def dump_image(x, filename, format="png", scale=False):
|
||||||
# img = image.array_to_img(x, scale=scale)
|
img = image.array_to_img(x, scale=scale)
|
||||||
img = image.array_to_img(x)
|
|
||||||
img.save(filename, format)
|
img.save(filename, format)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
def load_dir(path):
|
|
||||||
assert os.path.exists(path)
|
|
||||||
x_ls = []
|
|
||||||
for file in os.listdir(path):
|
|
||||||
cur_path = os.path.join(path, file)
|
|
||||||
im = image.load_img(cur_path, target_size=(224, 224))
|
|
||||||
im = image.img_to_array(im)
|
|
||||||
x_ls.append(im)
|
|
||||||
raw_x = np.array(x_ls)
|
|
||||||
return preprocess(raw_x, 'imagenet')
|
|
||||||
|
|
||||||
|
|
||||||
def load_embeddings(feature_extractors_names):
|
def load_embeddings(feature_extractors_names):
|
||||||
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
|
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
|
||||||
dictionaries = []
|
dictionaries = []
|
||||||
@ -457,7 +451,6 @@ def extractor_ls_predict(feature_extractors_ls, X):
|
|||||||
cur_features = extractor.predict(X)
|
cur_features = extractor.predict(X)
|
||||||
feature_ls.append(cur_features)
|
feature_ls.append(cur_features)
|
||||||
concated_feature_ls = np.concatenate(feature_ls, axis=1)
|
concated_feature_ls = np.concatenate(feature_ls, axis=1)
|
||||||
concated_feature_ls = normalize(concated_feature_ls)
|
|
||||||
return concated_feature_ls
|
return concated_feature_ls
|
||||||
|
|
||||||
|
|
||||||
@ -477,20 +470,6 @@ def pairwise_l2_distance(A, B):
|
|||||||
return ED
|
return ED
|
||||||
|
|
||||||
|
|
||||||
def calculate_dist_score(a, b, feature_extractors_ls, metric='l2'):
|
|
||||||
features1 = extractor_ls_predict(feature_extractors_ls, a)
|
|
||||||
features2 = extractor_ls_predict(feature_extractors_ls, b)
|
|
||||||
|
|
||||||
pair_cos = pairwise_l2_distance(features1, features2)
|
|
||||||
max_sum = np.min(pair_cos, axis=0)
|
|
||||||
max_sum_arg = np.argsort(max_sum)[::-1]
|
|
||||||
max_sum_arg = max_sum_arg[:len(a)]
|
|
||||||
max_sum = [max_sum[i] for i in max_sum_arg]
|
|
||||||
paired_target_X = [b[j] for j in max_sum_arg]
|
|
||||||
paired_target_X = np.array(paired_target_X)
|
|
||||||
return np.min(max_sum), paired_target_X
|
|
||||||
|
|
||||||
|
|
||||||
def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, metric='l2'):
|
def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, metric='l2'):
|
||||||
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
|
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user