mirror of
https://github.com/Shawn-Shan/fawkes.git
synced 2024-12-22 07:09:33 +05:30
add debug option and progbar
This commit is contained in:
parent
5b013a01fc
commit
6fe3a7c3fd
@ -4,17 +4,18 @@
|
||||
# @Link : https://www.shawnshan.com/
|
||||
|
||||
|
||||
__version__ = '0.0.8'
|
||||
__version__ = '0.0.9'
|
||||
|
||||
from .detect_faces import create_mtcnn, run_detect_face
|
||||
from .differentiator import FawkesMaskGeneration
|
||||
from .protection import main, Fawkes
|
||||
from .utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, get_file
|
||||
from .utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, get_file, \
|
||||
filter_image_paths
|
||||
|
||||
__all__ = (
|
||||
'__version__', 'create_mtcnn', 'run_detect_face',
|
||||
'FawkesMaskGeneration', 'load_extractor',
|
||||
'init_gpu',
|
||||
'select_target_label', 'dump_image', 'reverse_process_cloaked',
|
||||
'Faces', 'get_file', 'main', 'Fawkes'
|
||||
'Faces', 'get_file', 'filter_image_paths', 'main', 'Fawkes'
|
||||
)
|
||||
|
@ -10,8 +10,8 @@ from decimal import Decimal
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from fawkes.utils import preprocess, reverse_preprocess
|
||||
from keras.utils import Progbar
|
||||
|
||||
|
||||
class FawkesMaskGeneration:
|
||||
@ -226,8 +226,6 @@ class FawkesMaskGeneration:
|
||||
|
||||
self.init = tf.variables_initializer(var_list=[self.modifier] + new_vars)
|
||||
|
||||
print('Attacker loaded')
|
||||
|
||||
def preprocess_arctanh(self, imgs):
|
||||
|
||||
imgs = reverse_preprocess(imgs, self.intensity_range)
|
||||
@ -276,7 +274,7 @@ class FawkesMaskGeneration:
|
||||
adv_imgs.extend(adv_img)
|
||||
|
||||
elapsed_time = time.time() - start_time
|
||||
print('attack cost %f s' % (elapsed_time))
|
||||
print('protection cost %f s' % (elapsed_time))
|
||||
|
||||
return np.array(adv_imgs)
|
||||
|
||||
@ -356,7 +354,6 @@ class FawkesMaskGeneration:
|
||||
bottlesim_sum / nb_imgs))
|
||||
|
||||
finished_idx = set()
|
||||
try:
|
||||
total_distance = [0] * nb_imgs
|
||||
|
||||
if self.limit_dist:
|
||||
@ -370,6 +367,11 @@ class FawkesMaskGeneration:
|
||||
break
|
||||
total_distance[e] = bottlesim
|
||||
|
||||
if self.verbose == 0:
|
||||
progressbar = Progbar(
|
||||
self.MAX_ITERATIONS, width=30, verbose=1
|
||||
)
|
||||
|
||||
for iteration in range(self.MAX_ITERATIONS):
|
||||
|
||||
self.sess.run([self.train], feed_dict={self.learning_rate_holder: LR})
|
||||
@ -407,7 +409,6 @@ class FawkesMaskGeneration:
|
||||
|
||||
if iteration != 0 and iteration % (self.MAX_ITERATIONS // 2) == 0:
|
||||
LR = LR * 0.8
|
||||
print("Learning Rate: ", LR)
|
||||
|
||||
if iteration % (self.MAX_ITERATIONS // 5) == 0:
|
||||
if self.verbose == 1:
|
||||
@ -415,25 +416,8 @@ class FawkesMaskGeneration:
|
||||
bottlesim_sum = self.sess.run(self.bottlesim_sum)
|
||||
print('ITER %4d perturb: %.5f; sim: %f'
|
||||
% (iteration, dist_raw_sum / nb_imgs, bottlesim_sum / nb_imgs))
|
||||
|
||||
# protected_images = aimg_input_list
|
||||
#
|
||||
# orginal_images = np.copy(self.faces.cropped_faces)
|
||||
# cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked(
|
||||
# orginal_images)
|
||||
# final_images = self.faces.merge_faces(cloak_perturbation)
|
||||
#
|
||||
# for p_img, img in zip(protected_images, final_images):
|
||||
# dump_image(reverse_process_cloaked(p_img),
|
||||
# "/home/shansixioing/fawkes/data/emily/emily_cloaked_cropped{}.png".format(iteration),
|
||||
# format='png')
|
||||
#
|
||||
# dump_image(img,
|
||||
# "/home/shansixioing/fawkes/data/emily/emily_cloaked_{}.png".format(iteration),
|
||||
# format='png')
|
||||
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
if self.verbose == 0:
|
||||
progressbar.update(iteration)
|
||||
|
||||
if self.verbose == 1:
|
||||
loss_sum = float(self.sess.run(self.loss_sum))
|
||||
@ -445,7 +429,6 @@ class FawkesMaskGeneration:
|
||||
dist_sum,
|
||||
dist_raw_sum,
|
||||
bottlesim_sum / nb_imgs))
|
||||
|
||||
print("\n")
|
||||
best_adv = self.clipping(best_adv[:nb_imgs])
|
||||
|
||||
return best_adv
|
||||
|
@ -17,15 +17,17 @@ logging.getLogger('tensorflow').disabled = True
|
||||
import numpy as np
|
||||
from fawkes.differentiator import FawkesMaskGeneration
|
||||
from fawkes.utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, \
|
||||
Faces
|
||||
Faces, filter_image_paths
|
||||
|
||||
from fawkes.align_face import aligner
|
||||
from fawkes.utils import get_file
|
||||
|
||||
random.seed(12243)
|
||||
np.random.seed(122412)
|
||||
|
||||
|
||||
def generate_cloak_images(sess, feature_extractors, image_X, target_emb=None, th=0.01, faces=None, sd=1e9, lr=2,
|
||||
max_step=500, batch_size=1):
|
||||
max_step=500, batch_size=1, debug=False):
|
||||
batch_size = batch_size if len(image_X) > batch_size else len(image_X)
|
||||
|
||||
differentiator = FawkesMaskGeneration(sess, feature_extractors,
|
||||
@ -36,7 +38,7 @@ def generate_cloak_images(sess, feature_extractors, image_X, target_emb=None, th
|
||||
learning_rate=lr,
|
||||
max_iterations=max_step,
|
||||
l_threshold=th,
|
||||
verbose=1, maximize=False, keep_final=False, image_shape=image_X.shape[1:],
|
||||
verbose=1 if debug else 0, maximize=False, keep_final=False, image_shape=image_X.shape[1:],
|
||||
faces=faces)
|
||||
|
||||
cloaked_image_X = differentiator.attack(image_X, target_emb)
|
||||
@ -55,13 +57,14 @@ def check_imgs(imgs):
|
||||
|
||||
class Fawkes(object):
|
||||
def __init__(self, feature_extractor, gpu, batch_size):
|
||||
global graph
|
||||
graph = tf.get_default_graph()
|
||||
|
||||
self.feature_extractor = feature_extractor
|
||||
self.gpu = gpu
|
||||
self.batch_size = batch_size
|
||||
self.sess = init_gpu(gpu)
|
||||
global sess
|
||||
sess = init_gpu(gpu)
|
||||
global graph
|
||||
graph = tf.get_default_graph()
|
||||
|
||||
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
|
||||
if not os.path.exists(os.path.join(model_dir, "mtcnn.p.gz")):
|
||||
@ -69,11 +72,11 @@ class Fawkes(object):
|
||||
get_file("mtcnn.p.gz", "http://sandlab.cs.uchicago.edu/fawkes/files/mtcnn.p.gz", cache_dir=model_dir,
|
||||
cache_subdir='')
|
||||
|
||||
self.aligner = aligner(self.sess)
|
||||
self.fs_names = [feature_extractor]
|
||||
if isinstance(feature_extractor, list):
|
||||
self.fs_names = feature_extractor
|
||||
|
||||
self.aligner = aligner(sess)
|
||||
self.feature_extractors_ls = [load_extractor(name) for name in self.fs_names]
|
||||
|
||||
def mode2param(self, mode):
|
||||
@ -101,22 +104,26 @@ class Fawkes(object):
|
||||
return th, max_step, lr
|
||||
|
||||
def run_protection(self, image_paths, mode='mid', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png',
|
||||
separate_target=True):
|
||||
separate_target=True, debug=False):
|
||||
|
||||
if mode == 'custom':
|
||||
pass
|
||||
else:
|
||||
th, max_step, lr = self.mode2param(mode)
|
||||
|
||||
image_paths, loaded_images = filter_image_paths(image_paths)
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
if not image_paths:
|
||||
raise Exception("No images in the directory")
|
||||
with graph.as_default():
|
||||
faces = Faces(image_paths, self.aligner, verbose=1)
|
||||
faces = Faces(image_paths, loaded_images, self.aligner, verbose=1)
|
||||
|
||||
original_images = faces.cropped_faces
|
||||
original_images = np.array(original_images)
|
||||
|
||||
with sess.as_default():
|
||||
if separate_target:
|
||||
target_embedding = []
|
||||
for org_img in original_images:
|
||||
@ -127,22 +134,22 @@ class Fawkes(object):
|
||||
else:
|
||||
target_embedding = select_target_label(original_images, self.feature_extractors_ls, self.fs_names)
|
||||
|
||||
protected_images = generate_cloak_images(self.sess, self.feature_extractors_ls, original_images,
|
||||
protected_images = generate_cloak_images(sess, self.feature_extractors_ls, original_images,
|
||||
target_emb=target_embedding, th=th, faces=faces, sd=sd,
|
||||
lr=lr, max_step=max_step, batch_size=batch_size)
|
||||
lr=lr, max_step=max_step, batch_size=batch_size, debug=debug)
|
||||
|
||||
faces.cloaked_cropped_faces = protected_images
|
||||
|
||||
cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked(original_images)
|
||||
cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked(
|
||||
original_images)
|
||||
final_images = faces.merge_faces(cloak_perturbation)
|
||||
|
||||
for p_img, cloaked_img, path in zip(final_images, protected_images, image_paths):
|
||||
for p_img, path in zip(final_images, image_paths):
|
||||
file_name = "{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), mode, format)
|
||||
dump_image(p_img, file_name, format=format)
|
||||
|
||||
elapsed_time = time.time() - start_time
|
||||
print('attack cost %f s' % elapsed_time)
|
||||
# elapsed_time = time.time() - start_time
|
||||
print("Done!")
|
||||
return None
|
||||
|
||||
|
||||
def main(*argv):
|
||||
@ -175,6 +182,7 @@ def main(*argv):
|
||||
|
||||
parser.add_argument('--batch-size', type=int, default=1)
|
||||
parser.add_argument('--separate_target', action='store_true')
|
||||
parser.add_argument('--debug', action='store_true')
|
||||
|
||||
parser.add_argument('--format', type=str,
|
||||
help="final image format",
|
||||
@ -192,7 +200,7 @@ def main(*argv):
|
||||
protector = Fawkes(args.feature_extractor, args.gpu, args.batch_size)
|
||||
protector.run_protection(image_paths, mode=args.mode, th=args.th, sd=args.sd, lr=args.lr, max_step=args.max_step,
|
||||
batch_size=args.batch_size, format=args.format,
|
||||
separate_target=args.separate_target)
|
||||
separate_target=args.separate_target, debug=args.debug)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -55,6 +55,7 @@ if sys.version_info[0] == 2:
|
||||
else:
|
||||
from six.moves.urllib.request import urlretrieve
|
||||
|
||||
|
||||
def clip_img(X, preprocessing='raw'):
|
||||
X = reverse_preprocess(X, preprocessing)
|
||||
X = np.clip(X, 0.0, 255.0)
|
||||
@ -67,6 +68,8 @@ def load_image(path):
|
||||
img = Image.open(path)
|
||||
except PIL.UnidentifiedImageError:
|
||||
return None
|
||||
except IsADirectoryError:
|
||||
return None
|
||||
|
||||
if img._getexif() is not None:
|
||||
for orientation in ExifTags.TAGS.keys():
|
||||
@ -89,9 +92,24 @@ def load_image(path):
|
||||
return image_array
|
||||
|
||||
|
||||
class Faces(object):
|
||||
def __init__(self, image_paths, aligner, verbose=1, eval_local=False):
|
||||
def filter_image_paths(image_paths):
|
||||
print("Identify {} files in the directory".format(len(image_paths)))
|
||||
new_image_paths = []
|
||||
new_images = []
|
||||
for p in image_paths:
|
||||
img = load_image(p)
|
||||
if img is None:
|
||||
print("{} is not an image file, skipped".format(p.split("/")[-1]))
|
||||
continue
|
||||
new_image_paths.append(p)
|
||||
new_images.append(img)
|
||||
print("Identify {} images in the directory".format(len(new_image_paths)))
|
||||
return new_image_paths, new_images
|
||||
|
||||
|
||||
class Faces(object):
|
||||
def __init__(self, image_paths, loaded_images, aligner, verbose=1, eval_local=False):
|
||||
self.image_paths = image_paths
|
||||
self.verbose = verbose
|
||||
self.aligner = aligner
|
||||
self.org_faces = []
|
||||
@ -99,12 +117,9 @@ class Faces(object):
|
||||
self.cropped_faces_shape = []
|
||||
self.cropped_index = []
|
||||
self.callback_idx = []
|
||||
if verbose:
|
||||
print("Identify {} images".format(len(image_paths)))
|
||||
for i, p in enumerate(image_paths):
|
||||
cur_img = load_image(p)
|
||||
if cur_img is None:
|
||||
continue
|
||||
for i in range(0, len(loaded_images)):
|
||||
cur_img = loaded_images[i]
|
||||
p = image_paths[i]
|
||||
|
||||
self.org_faces.append(cur_img)
|
||||
|
||||
@ -115,7 +130,7 @@ class Faces(object):
|
||||
align_img = align(cur_img, self.aligner, margin=margin)
|
||||
|
||||
if align_img is None:
|
||||
print("Find 0 face(s) in {}".format(p.split("/")[-1]))
|
||||
print("Find 0 face(s)".format(p.split("/")[-1]))
|
||||
continue
|
||||
|
||||
cur_faces = align_img[0]
|
||||
@ -143,8 +158,7 @@ class Faces(object):
|
||||
self.callback_idx.extend([i] * len(cur_faces_square))
|
||||
|
||||
if not self.cropped_faces:
|
||||
print("No faces detected")
|
||||
exit(1)
|
||||
raise Exception("No faces detected")
|
||||
|
||||
self.cropped_faces = np.array(self.cropped_faces)
|
||||
|
||||
@ -469,8 +483,11 @@ def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, m
|
||||
embs = np.array(embs)
|
||||
|
||||
pair_dist = pairwise_l2_distance(original_feature_x, embs)
|
||||
pair_dist = np.array(pair_dist)
|
||||
|
||||
max_sum = np.min(pair_dist, axis=0)
|
||||
max_id = np.argmax(max_sum)
|
||||
max_id_ls = np.argsort(max_sum)[::-1]
|
||||
max_id = random.choice(max_id_ls[:20])
|
||||
|
||||
target_data_id = paths[int(max_id)]
|
||||
image_dir = os.path.join(model_dir, "target_data/{}".format(target_data_id))
|
||||
@ -480,9 +497,12 @@ def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, m
|
||||
for i in range(10):
|
||||
if os.path.exists(os.path.join(model_dir, "target_data/{}/{}.jpg".format(target_data_id, i))):
|
||||
continue
|
||||
try:
|
||||
get_file("{}.jpg".format(i),
|
||||
"http://sandlab.cs.uchicago.edu/fawkes/files/target_data/{}/{}.jpg".format(target_data_id, i),
|
||||
cache_dir=model_dir, cache_subdir='target_data/{}/'.format(target_data_id))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
image_paths = glob.glob(image_dir + "/*.jpg")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user