2
0
mirror of https://github.com/Shawn-Shan/fawkes.git synced 2024-12-22 07:09:33 +05:30

add option to bypass face detection step

This commit is contained in:
Shawn-Shan 2020-08-01 00:30:40 -05:00
parent 641e020e09
commit 8ceeaf54b0
2 changed files with 33 additions and 18 deletions

View File

@ -84,7 +84,7 @@ class Fawkes(object):
return th, max_step, lr
def run_protection(self, image_paths, mode='min', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png',
separate_target=True, debug=False):
separate_target=True, debug=False, no_align=False):
if mode == 'custom':
pass
else:
@ -100,9 +100,9 @@ class Fawkes(object):
return 3
with graph.as_default():
faces = Faces(image_paths, loaded_images, self.aligner, verbose=1)
faces = Faces(image_paths, loaded_images, self.aligner, verbose=1, no_align=no_align)
original_images = faces.cropped_faces
if len(original_images) == 0:
print("No face detected. ")
return 2
@ -184,12 +184,14 @@ def main(*argv):
parser.add_argument('--max-step', help='only relevant with mode=custom, number of steps for optimization', type=int,
default=1000)
parser.add_argument('--sd', type=int, help='only relevant with mode=custom, penalty number, read more in the paper',
default=1e6)
default=1e9)
parser.add_argument('--lr', type=float, help='only relevant with mode=custom, learning rate', default=2)
parser.add_argument('--batch-size', help="number of images to run optimization together", type=int, default=1)
parser.add_argument('--separate_target', help="whether select separate targets for each faces in the directory",
action='store_true')
parser.add_argument('--no-align', help="whether to detect and crop faces",
action='store_true')
parser.add_argument('--debug', help="turn on debug and copy/paste the stdout when reporting an issue on github",
action='store_true')
parser.add_argument('--format', type=str,
@ -209,7 +211,7 @@ def main(*argv):
protector.run_protection(image_paths, mode=args.mode, th=args.th, sd=args.sd, lr=args.lr,
max_step=args.max_step,
batch_size=args.batch_size, format=args.format,
separate_target=args.separate_target, debug=args.debug)
separate_target=args.separate_target, debug=args.debug, no_align=args.no_align)
if __name__ == '__main__':

View File

@ -120,9 +120,11 @@ def filter_image_paths(image_paths):
class Faces(object):
def __init__(self, image_paths, loaded_images, aligner, verbose=1, eval_local=False, preprocessing=True):
def __init__(self, image_paths, loaded_images, aligner, verbose=1, eval_local=False, preprocessing=True,
no_align=False):
self.image_paths = image_paths
self.verbose = verbose
self.no_align = no_align
self.aligner = aligner
self.org_faces = []
self.cropped_faces = []
@ -132,25 +134,27 @@ class Faces(object):
for i in range(0, len(loaded_images)):
cur_img = loaded_images[i]
p = image_paths[i]
self.org_faces.append(cur_img)
if eval_local:
margin = 0
else:
margin = 0.7
align_img = align(cur_img, self.aligner, margin=margin)
if not no_align:
align_img = align(cur_img, self.aligner, margin=margin)
if align_img is None:
print("Find 0 face(s)".format(p.split("/")[-1]))
continue
cur_faces = align_img[0]
else:
cur_faces = [cur_img]
cur_shapes = [f.shape[:-1] for f in cur_faces]
cur_faces_square = []
if verbose:
if verbose and not no_align:
print("Find {} face(s) in {}".format(len(cur_faces), p.split("/")[-1]))
if eval_local:
cur_faces = cur_faces[:1]
@ -161,13 +165,19 @@ class Faces(object):
else:
long_size = max([img.shape[1], img.shape[0]])
base = np.zeros((long_size, long_size, 3))
# import pdb
# pdb.set_trace()
base[0:img.shape[0], 0:img.shape[1], :] = img
cur_faces_square.append(base)
cur_index = align_img[1]
cur_faces_square = [resize(f, (224, 224)) for f in cur_faces_square]
self.cropped_faces.extend(cur_faces_square)
if not self.no_align:
cur_index = align_img[1]
self.cropped_faces_shape.extend(cur_shapes)
self.cropped_faces.extend(cur_faces_square)
self.cropped_index.extend(cur_index)
self.callback_idx.extend([i] * len(cur_faces_square))
@ -186,6 +196,8 @@ class Faces(object):
return self.cropped_faces
def merge_faces(self, protected_images, original_images):
if self.no_align:
return np.clip(protected_images, 0.0, 255.0)
self.cloaked_faces = np.copy(self.org_faces)
@ -520,6 +532,7 @@ def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, m
https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/keras/utils/data_utils.py#L168-L297
"""
def get_file(fname,
origin,
untar=False,