mirror of
https://github.com/Shawn-Shan/fawkes.git
synced 2024-12-22 07:09:33 +05:30
update readme
This commit is contained in:
parent
83bb798373
commit
5b15437b69
48
README.md
48
README.md
@ -1,2 +1,46 @@
|
|||||||
# fawkes
|
# Fawkes
|
||||||
Code implementation of the paper "Fawkes: Protecting Personal Privacy against Unauthorized Deep Learning Models", at USENIX Security 2020. https://sandlab.cs.uchicago.edu/
|
Code implementation of the paper "[Fawkes: Protecting Personal Privacy against Unauthorized Deep Learning Models](https://arxiv.org/pdf/2002.08327.pdf)", at *USENIX Security 2020*.
|
||||||
|
|
||||||
|
### BEFORE YOU RUN OUR CODE
|
||||||
|
We appreciate your interest in our work and for trying out our code. We've noticed several cases where incorrect configuration leads to poor performances of protection. If you also observe low detection performance far away from what we presented in the paper, please feel free to open an issue in this repo or contact any of the authors directly. We are more than happy to help you debug your experiment and find out the correct configuration.
|
||||||
|
|
||||||
|
### ABOUT
|
||||||
|
|
||||||
|
This repository contains code implementation of the paper "[Fawkes: Protecting Personal Privacy against Unauthorized Deep Learning Models](https://arxiv.org/pdf/2002.08327.pdf)", at *USENIX Security 2020*.
|
||||||
|
|
||||||
|
### DEPENDENCIES
|
||||||
|
|
||||||
|
Our code is implemented and tested on Keras with TensorFlow backend. Following packages are used by our code.
|
||||||
|
|
||||||
|
- `keras==2.3.1`
|
||||||
|
- `numpy==1.18.4`
|
||||||
|
- `tensorflow-gpu==1.13.1`
|
||||||
|
|
||||||
|
Our code is tested on `Python 3.6.8`
|
||||||
|
|
||||||
|
### HOWTO
|
||||||
|
|
||||||
|
#### Download and Config Datasets
|
||||||
|
The first step is to download several datasets for protection and target selection.
|
||||||
|
1. Download the following dataset to your local machine. After downloading the datasets, restructure it the same way as the FaceScrub dataset downloaded.
|
||||||
|
- FaceScrub -- used for protection evaluation (link)
|
||||||
|
- VGGFace1 -- used for target select (link)
|
||||||
|
- VGGFace2 -- used for target select (link)
|
||||||
|
- WebFace -- used for target select (link)
|
||||||
|
|
||||||
|
2. Config datasets
|
||||||
|
open `fawkes/config.py` and update the `DATASETS` dictionary with the path to each dataset. Then run `python fawkes/config.py`. Every time the datasets are updated or moved, remember to rerun the command with the updated path.
|
||||||
|
|
||||||
|
3. Calculate embeddings using feature extractor.
|
||||||
|
Run `python3 fawkes/prepare_feature_extractor.py --candidate-datasets scrub vggface1 vggface2 webface`. This will calculate and cache the embeddings using the default feature extractor we provide. To use a customized feature extractor, please look at the Advance section at the end.
|
||||||
|
|
||||||
|
#### Generate Cloak for Images
|
||||||
|
To generate cloak, run
|
||||||
|
`python3 fawkes/protection.py --gpu 0 --dataset scrub --feature-extractor webface_dense_robust_extract`
|
||||||
|
For more information about the detailed parameters, please read `fawkes/protection.py`.
|
||||||
|
The code will output a directory in `results/` with `cloak_data.p` inside. You can check the cloaked images or inspect the changes in `this notebook`.
|
||||||
|
|
||||||
|
#### Evaluate Cloak Effectiveness
|
||||||
|
To evaluate the cloak, run `python3 fawkes/eval_cloak.py --gpu 0 --cloak_data PATH-TO-RESULT-DIRECTORY --transfer_model vggface2_inception_extract`.
|
||||||
|
|
||||||
|
The code will print out the tracker model accuracy on uncloaked/original test images of the protected user, which should be close to 0.
|
@ -3,7 +3,6 @@ import json
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
DATASETS = {
|
DATASETS = {
|
||||||
"pubfig": "../data/pubfig/",
|
|
||||||
"scrub": "../data/scrub/",
|
"scrub": "../data/scrub/",
|
||||||
"vggface1": "/mnt/data/sixiongshan/data/vggface/",
|
"vggface1": "/mnt/data/sixiongshan/data/vggface/",
|
||||||
# "vggface2": "/mnt/data/sixiongshan/data/vggface2/",
|
# "vggface2": "/mnt/data/sixiongshan/data/vggface2/",
|
||||||
@ -12,7 +11,6 @@ DATASETS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
config = {}
|
config = {}
|
||||||
for dataset in DATASETS.keys():
|
for dataset in DATASETS.keys():
|
||||||
|
@ -20,8 +20,6 @@ from keras.applications.vgg16 import preprocess_input
|
|||||||
# loc = locale.getlocale()
|
# loc = locale.getlocale()
|
||||||
# locale.setlocale(locale.LC_ALL, loc)
|
# locale.setlocale(locale.LC_ALL, loc)
|
||||||
|
|
||||||
SEEDS = [12345, 23451, 34512, 45123, 51234, 54321, 43215, 32154, 21543, 15432]
|
|
||||||
|
|
||||||
|
|
||||||
def select_samples(data_dir):
|
def select_samples(data_dir):
|
||||||
all_data_path = []
|
all_data_path = []
|
||||||
@ -88,9 +86,6 @@ def eval_cloaked_test_data(cloak_data, n_classes, validation_split=0.1):
|
|||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
SEED = SEEDS[args.seed_idx]
|
|
||||||
random.seed(SEED)
|
|
||||||
set_random_seed(SEED)
|
|
||||||
init_gpu(args.gpu)
|
init_gpu(args.gpu)
|
||||||
|
|
||||||
if args.dataset == 'pubfig':
|
if args.dataset == 'pubfig':
|
||||||
@ -133,36 +128,24 @@ def main():
|
|||||||
print("Accuracy on uncloaked/original images TEST: {:.4f}".format(acc_original))
|
print("Accuracy on uncloaked/original images TEST: {:.4f}".format(acc_original))
|
||||||
EVAL_RES['acc_original'] = acc_original
|
EVAL_RES['acc_original'] = acc_original
|
||||||
|
|
||||||
_, acc_cloaked = model.evaluate(cloaked_test_X, cloaked_test_Y, verbose=0)
|
|
||||||
print("Accuracy on cloaked images TEST: {:.4f}".format(acc_cloaked))
|
|
||||||
EVAL_RES['acc_cloaked'] = acc_cloaked
|
|
||||||
|
|
||||||
_, other_acc = model.evaluate_generator(test_generator, verbose=0, steps=50)
|
_, other_acc = model.evaluate_generator(test_generator, verbose=0, steps=50)
|
||||||
print("Accuracy on other classes {:.4f}".format(other_acc))
|
print("Accuracy on other classes {:.4f}".format(other_acc))
|
||||||
EVAL_RES['other_acc'] = other_acc
|
EVAL_RES['other_acc'] = other_acc
|
||||||
dump_dictionary_as_json(EVAL_RES,
|
dump_dictionary_as_json(EVAL_RES, os.path.join(CLOAK_DIR, "eval_seed{}.json".format(args.seed_idx)))
|
||||||
os.path.join(CLOAK_DIR,
|
|
||||||
"eval_seed{}_th{}_sd{}.json".format(args.seed_idx, args.th, args.sd)))
|
|
||||||
|
|
||||||
|
|
||||||
def parse_arguments(argv):
|
def parse_arguments(argv):
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
parser.add_argument('--gpu', type=str,
|
parser.add_argument('--gpu', type=str,
|
||||||
help='GPU id', default='2')
|
help='GPU id', default='0')
|
||||||
parser.add_argument('--seed_idx', type=int,
|
|
||||||
help='random seed index', default=0)
|
|
||||||
parser.add_argument('--dataset', type=str,
|
parser.add_argument('--dataset', type=str,
|
||||||
help='name of dataset', default='scrub')
|
help='name of dataset', default='scrub')
|
||||||
parser.add_argument('--cloak_data', type=str,
|
parser.add_argument('--cloak_data', type=str,
|
||||||
help='name of the cloak result directory',
|
help='name of the cloak result directory',
|
||||||
default='scrub_webface_dense_robust_protectKristen_Alderson')
|
default='scrub_webface_dense_robust_extract_protectPatrick_Dempsey')
|
||||||
|
|
||||||
parser.add_argument('--sd', type=int, default=1e6)
|
|
||||||
parser.add_argument('--th', type=float, default=0.01)
|
|
||||||
|
|
||||||
parser.add_argument('--transfer_model', type=str,
|
parser.add_argument('--transfer_model', type=str,
|
||||||
help='student model', default='../feature_extractors/vggface2_inception_extract.h5')
|
help='the feature extractor used for tracker model training. It can be the same or not same as the user\'s', default='vggface2_inception_extract')
|
||||||
parser.add_argument('--batch_size', type=int, default=32)
|
parser.add_argument('--batch_size', type=int, default=32)
|
||||||
parser.add_argument('--validation_split', type=float, default=0.1)
|
parser.add_argument('--validation_split', type=float, default=0.1)
|
||||||
parser.add_argument('--n_epochs', type=int, default=5)
|
parser.add_argument('--n_epochs', type=int, default=5)
|
||||||
@ -172,4 +155,3 @@ def parse_arguments(argv):
|
|||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
args = parse_arguments(sys.argv[1:])
|
args = parse_arguments(sys.argv[1:])
|
||||||
main()
|
main()
|
||||||
# python3 eval_cloak.py --gpu 2 --n_uncloaked 0 --dataset pubfig --model_idx 5 --transfer_model webface_inception
|
|
||||||
|
@ -16,8 +16,6 @@ set_random_seed(12242)
|
|||||||
NUM_IMG_PROTECTED = 32 # Number of images used to optimize the target class
|
NUM_IMG_PROTECTED = 32 # Number of images used to optimize the target class
|
||||||
BATCH_SIZE = 32
|
BATCH_SIZE = 32
|
||||||
|
|
||||||
IMG_SHAPE = [224, 224, 3]
|
|
||||||
|
|
||||||
MAX_ITER = 1000
|
MAX_ITER = 1000
|
||||||
|
|
||||||
|
|
||||||
@ -34,7 +32,7 @@ def diff_protected_data(sess, feature_extractors_ls, image_X, number_protect, ta
|
|||||||
verbose=1, maximize=False, keep_final=False, image_shape=image_X.shape[1:])
|
verbose=1, maximize=False, keep_final=False, image_shape=image_X.shape[1:])
|
||||||
|
|
||||||
if len(target_X) < len(image_X):
|
if len(target_X) < len(image_X):
|
||||||
target_X = np.concatenate([target_X, target_X, target_X, target_X, target_X])
|
target_X = np.concatenate([target_X, target_X, target_X])
|
||||||
target_X = target_X[:len(image_X)]
|
target_X = target_X[:len(image_X)]
|
||||||
cloaked_image_X = differentiator.attack(image_X, target_X)
|
cloaked_image_X = differentiator.attack(image_X, target_X)
|
||||||
return cloaked_image_X
|
return cloaked_image_X
|
||||||
@ -55,9 +53,8 @@ def perform_defense():
|
|||||||
feature_extractors_ls = [load_extractor(name) for name in FEATURE_EXTRACTORS]
|
feature_extractors_ls = [load_extractor(name) for name in FEATURE_EXTRACTORS]
|
||||||
protect_class = args.protect_class
|
protect_class = args.protect_class
|
||||||
|
|
||||||
cloak_data = CloakData(args.dataset, target_selection_tries=1, protect_class=protect_class)
|
cloak_data = CloakData(args.dataset, protect_class=protect_class)
|
||||||
model_name = args.feature_extractor.split("/")[-1].split('.')[0].replace("_extract", "")
|
RES_FILE_NAME = "{}_{}_protect{}".format(args.dataset, args.feature_extractor, cloak_data.protect_class)
|
||||||
RES_FILE_NAME = "{}_{}_protect{}".format(args.dataset, model_name, cloak_data.protect_class)
|
|
||||||
RES_FILE_NAME = os.path.join(RES_DIR, RES_FILE_NAME)
|
RES_FILE_NAME = os.path.join(RES_DIR, RES_FILE_NAME)
|
||||||
print("Protect Class: ", cloak_data.protect_class)
|
print("Protect Class: ", cloak_data.protect_class)
|
||||||
|
|
||||||
@ -84,7 +81,7 @@ def parse_arguments(argv):
|
|||||||
help='name of dataset', default='scrub')
|
help='name of dataset', default='scrub')
|
||||||
parser.add_argument('--feature-extractor', type=str,
|
parser.add_argument('--feature-extractor', type=str,
|
||||||
help="name of the feature extractor used for optimization",
|
help="name of the feature extractor used for optimization",
|
||||||
default="webface_dense_robust")
|
default="webface_dense_robust_extract")
|
||||||
parser.add_argument('--th', type=float, default=0.007)
|
parser.add_argument('--th', type=float, default=0.007)
|
||||||
parser.add_argument('--sd', type=int, default=1e5)
|
parser.add_argument('--sd', type=int, default=1e5)
|
||||||
parser.add_argument('--protect_class', type=str, default=None)
|
parser.add_argument('--protect_class', type=str, default=None)
|
||||||
|
Loading…
Reference in New Issue
Block a user