2
0
mirror of https://github.com/Shawn-Shan/fawkes.git synced 2024-12-22 07:09:33 +05:30

fix utils

Former-commit-id: 366e3dc5c64ebf707640a2f969ca3bb867e9665f [formerly 9cef0f9c15d5956f586af5d9059a71b3a999824a]
Former-commit-id: 3ae67acd4d630ea937abc41ad125471f527ef617
This commit is contained in:
Shawn-Shan 2020-07-12 17:56:27 -05:00
parent 81a6fed188
commit a44dbe273f
2 changed files with 6 additions and 172 deletions

View File

@ -8,7 +8,6 @@ import shutil
import sys import sys
import tarfile import tarfile
import zipfile import zipfile
import six import six
from six.moves.urllib.error import HTTPError, URLError from six.moves.urllib.error import HTTPError, URLError
@ -21,7 +20,7 @@ import keras.backend as K
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
from PIL import Image, ExifTags from PIL import Image, ExifTags
from keras.layers import Dense, Activation from keras.layers import Dense, Activation, Dropout
from keras.models import Model from keras.models import Model
from keras.preprocessing import image from keras.preprocessing import image
from skimage.transform import resize from skimage.transform import resize
@ -63,7 +62,9 @@ def clip_img(X, preprocessing='raw'):
def load_image(path): def load_image(path):
img = Image.open(path) img = Image.open(path)
if img._getexif() is not None: if img._getexif() is not None:
for orientation in ExifTags.TAGS.keys(): for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation': if ExifTags.TAGS[orientation] == 'Orientation':
@ -184,10 +185,12 @@ def fix_gpu_memory(mem_fraction=1):
return sess return sess
def load_victim_model(number_classes, teacher_model=None, end2end=False): def load_victim_model(number_classes, teacher_model=None, end2end=False, dropout=0):
for l in teacher_model.layers: for l in teacher_model.layers:
l.trainable = end2end l.trainable = end2end
x = teacher_model.layers[-1].output x = teacher_model.layers[-1].output
if dropout > 0:
x = Dropout(dropout)(x)
x = Dense(number_classes)(x) x = Dense(number_classes)(x)
x = Activation('softmax', name="act")(x) x = Activation('softmax', name="act")(x)
model = Model(teacher_model.input, x) model = Model(teacher_model.input, x)

View File

@ -1,169 +0,0 @@
import argparse
import os
import sys
import numpy as np
sys.path.append("/home/shansixioing/fawkes/fawkes")
from utils import extract_faces, get_dataset_path, init_gpu, load_extractor, load_victim_model
import random
import glob
from keras.preprocessing import image
from keras.utils import to_categorical
from keras.applications.vgg16 import preprocess_input
def select_samples(data_dir):
all_data_path = []
for cls in os.listdir(data_dir):
cls_dir = os.path.join(data_dir, cls)
for data_path in os.listdir(cls_dir):
all_data_path.append(os.path.join(cls_dir, data_path))
return all_data_path
def generator_wrap(protect_images, test=False, validation_split=0.1):
train_data_dir, test_data_dir, num_classes, num_images = get_dataset_path(args.dataset)
idx = 0
path2class = {}
path2imgs_list = {}
for target_path in sorted(glob.glob(train_data_dir + "/*")):
path2class[target_path] = idx
path2imgs_list[target_path] = glob.glob(os.path.join(target_path, "*"))
idx += 1
if idx >= args.num_classes:
break
path2class["protected"] = idx
np.random.seed(12345)
while True:
batch_X = []
batch_Y = []
cur_batch_path = np.random.choice(list(path2class.keys()), args.batch_size)
for p in cur_batch_path:
cur_y = path2class[p]
if test and p == 'protected':
continue
# protect class images in train dataset
elif p == 'protected':
cur_x = random.choice(protect_images)
else:
cur_path = random.choice(path2imgs_list[p])
im = image.load_img(cur_path, target_size=(224, 224))
cur_x = image.img_to_array(im)
cur_x = preprocess_input(cur_x)
batch_X.append(cur_x)
batch_Y.append(cur_y)
batch_X = np.array(batch_X)
batch_Y = to_categorical(np.array(batch_Y), num_classes=args.num_classes + 1)
yield batch_X, batch_Y
def eval_uncloaked_test_data(cloak_data, n_classes):
original_label = cloak_data.path2idx[list(cloak_data.protect_class_path)[0]]
protect_test_X = cloak_data.protect_test_X
original_Y = [original_label] * len(protect_test_X)
original_Y = to_categorical(original_Y, n_classes)
return protect_test_X, original_Y
def eval_cloaked_test_data(cloak_data, n_classes, validation_split=0.1):
split = int(len(cloak_data.cloaked_protect_train_X) * (1 - validation_split))
cloaked_test_X = cloak_data.cloaked_protect_train_X[split:]
original_label = cloak_data.path2idx[list(cloak_data.protect_class_path)[0]]
original_Y = [original_label] * len(cloaked_test_X)
original_Y = to_categorical(original_Y, n_classes)
return cloaked_test_X, original_Y
def main():
init_gpu(args.gpu)
#
# if args.dataset == 'pubfig':
# N_CLASSES = 65
# CLOAK_DIR = args.cloak_data
# elif args.dataset == 'scrub':
# N_CLASSES = 530
# CLOAK_DIR = args.cloak_data
# else:
# raise ValueError
print("Build attacker's model")
image_paths = glob.glob(os.path.join(args.directory, "*"))
original_image_paths = sorted([path for path in image_paths if "_cloaked" not in path.split("/")[-1]])
protect_image_paths = sorted([path for path in image_paths if "_cloaked" in path.split("/")[-1]])
original_imgs = np.array([extract_faces(image.img_to_array(image.load_img(cur_path))) for cur_path in
original_image_paths[:150]])
original_y = to_categorical([args.num_classes] * len(original_imgs), num_classes=args.num_classes + 1)
protect_imgs = [extract_faces(image.img_to_array(image.load_img(cur_path))) for cur_path in
protect_image_paths]
train_generator = generator_wrap(protect_imgs,
validation_split=args.validation_split)
test_generator = generator_wrap(protect_imgs, test=True,
validation_split=args.validation_split)
base_model = load_extractor(args.transfer_model)
model = load_victim_model(teacher_model=base_model, number_classes=args.num_classes + 1)
# cloaked_test_X, cloaked_test_Y = eval_cloaked_test_data(cloak_data, args.num_classes,
# validation_split=args.validation_split)
# try:
train_data_dir, test_data_dir, num_classes, num_images = get_dataset_path(args.dataset)
model.fit_generator(train_generator, steps_per_epoch=num_images // 32,
validation_data=(original_imgs, original_y),
epochs=args.n_epochs,
verbose=1,
use_multiprocessing=True, workers=5)
# except KeyboardInterrupt:
# pass
_, acc_original = model.evaluate(original_imgs, original_y, verbose=0)
print("Accuracy on uncloaked/original images TEST: {:.4f}".format(acc_original))
# EVAL_RES['acc_original'] = acc_original
_, other_acc = model.evaluate_generator(test_generator, verbose=0, steps=50)
print("Accuracy on other classes {:.4f}".format(other_acc))
# EVAL_RES['other_acc'] = other_acc
# dump_dictionary_as_json(EVAL_RES, os.path.join(CLOAK_DIR, "eval_seed{}.json".format(args.seed_idx)))
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str,
help='GPU id', default='0')
parser.add_argument('--dataset', type=str,
help='name of dataset', default='scrub')
parser.add_argument('--num_classes', type=int,
help='name of dataset', default=520)
parser.add_argument('--directory', '-d', type=str,
help='name of the cloak result directory',
default='img/')
parser.add_argument('--transfer_model', type=str,
help='the feature extractor used for tracker model training. ', default='low_extract')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--validation_split', type=float, default=0.1)
parser.add_argument('--n_epochs', type=int, default=3)
return parser.parse_args(argv)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main()