2
0
mirror of https://github.com/Shawn-Shan/fawkes.git synced 2024-12-22 07:09:33 +05:30

endpoint api

Former-commit-id: 101c0d4cfbfe62d873a289a1ba1ccb47bdbd66f5 [formerly 57e917cb08f4219a703fbdab6e782490077e8480]
Former-commit-id: 6a0071b5ca45c7651f3aceb952a0eebddfcc6897
This commit is contained in:
Shawn-Shan 2020-07-06 16:52:46 -05:00
parent b1e7b67055
commit 30fa1635a5
8 changed files with 321 additions and 121 deletions

12
app/fawkes.py Normal file
View File

@ -0,0 +1,12 @@
import sys
if sys.version_info < (3, 0):
# Python 2
import Tkinter as tk
else:
# Python 3
import tkinter as tk
root = tk.Tk()
root.title("Sandwich")
tk.Button(root, text="Make me a Sandwich").pack()
tk.mainloop()

12
app/setup.py Normal file
View File

@ -0,0 +1,12 @@
from setuptools import setup
APP = ['Sandwich.py']
DATA_FILES = []
OPTIONS = {'argv_emulation': True}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)

View File

@ -10,6 +10,7 @@ from decimal import Decimal
import numpy as np
import tensorflow as tf
from .utils import preprocess, reverse_preprocess
@ -74,6 +75,7 @@ class FawkesMaskGeneration:
self.input_shape = tuple([self.batch_size] + self.single_shape)
self.bottleneck_shape = tuple([self.batch_size] + self.single_shape)
# self.bottleneck_shape = tuple([self.batch_size, bottleneck_model_ls[0].output_shape[-1]])
# the variable we're going to optimize over
@ -403,9 +405,9 @@ class FawkesMaskGeneration:
if all_clear:
break
if iteration != 0 and iteration % (self.MAX_ITERATIONS // 2) == 0:
LR = LR / 2
print("Learning Rate: ", LR)
# if iteration != 0 and iteration % (self.MAX_ITERATIONS // 2) == 0:
# LR = LR / 2
# print("Learning Rate: ", LR)
if iteration % (self.MAX_ITERATIONS // 5) == 0:
if self.verbose == 1:

View File

@ -7,6 +7,7 @@ import glob
import os
import random
import sys
import time
import numpy as np
@ -17,12 +18,10 @@ from .utils import load_extractor, init_gpu, select_target_label, dump_image, re
random.seed(12243)
np.random.seed(122412)
BATCH_SIZE = 32
def generate_cloak_images(sess, feature_extractors, image_X, target_emb=None, th=0.01, faces=None, sd=1e9, lr=2,
max_step=500):
batch_size = BATCH_SIZE if len(image_X) > BATCH_SIZE else len(image_X)
max_step=500, batch_size=1):
batch_size = batch_size if len(image_X) > batch_size else len(image_X)
differentiator = FawkesMaskGeneration(sess, feature_extractors,
batch_size=batch_size,
@ -50,11 +49,11 @@ def check_imgs(imgs):
def main(*argv):
start_time = time.time()
if not argv:
argv = list(sys.argv)
# attach SIGPIPE handler to properly handle broken pipe
try: # sigpipe not available under windows. just ignore in this case
try:
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except Exception as e:
@ -78,25 +77,34 @@ def main(*argv):
parser.add_argument('--sd', type=int, default=1e9)
parser.add_argument('--lr', type=float, default=2)
parser.add_argument('--batch-size', type=int, default=1)
parser.add_argument('--separate_target', action='store_true')
parser.add_argument('--format', type=str,
help="final image format",
default="jpg")
default="png")
args = parser.parse_args(argv[1:])
if args.mode == 'low':
args.feature_extractor = "high_extract"
args.th = 0.003
args.max_step = 100
args.lr = 15
elif args.mode == 'mid':
args.feature_extractor = "high_extract"
args.th = 0.005
args.max_step = 100
args.lr = 15
elif args.mode == 'high':
args.feature_extractor = "high_extract"
args.th = 0.007
args.max_step = 100
args.lr = 10
elif args.mode == 'ultra':
args.feature_extractor = "high_extract"
args.th = 0.01
args.max_step = 1000
args.lr = 5
elif args.mode == 'custom':
pass
else:
@ -116,7 +124,7 @@ def main(*argv):
print("No images in the directory")
exit(1)
faces = Faces(image_paths, sess)
faces = Faces(image_paths, sess, verbose=1)
orginal_images = faces.cropped_faces
orginal_images = np.array(orginal_images)
@ -133,7 +141,7 @@ def main(*argv):
protected_images = generate_cloak_images(sess, feature_extractors_ls, orginal_images,
target_emb=target_embedding, th=args.th, faces=faces, sd=args.sd,
lr=args.lr, max_step=args.max_step)
lr=args.lr, max_step=args.max_step, batch_size=args.batch_size)
faces.cloaked_cropped_faces = protected_images
@ -141,9 +149,12 @@ def main(*argv):
final_images = faces.merge_faces(cloak_perturbation)
for p_img, cloaked_img, path in zip(final_images, protected_images, image_paths):
file_name = "{}_{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), args.mode, args.th, args.format)
file_name = "{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), args.mode, args.format)
dump_image(p_img, file_name, format=args.format)
elapsed_time = time.time() - start_time
print('attack cost %f s' % (elapsed_time))
if __name__ == '__main__':
main(*sys.argv)

View File

@ -4,7 +4,13 @@ import json
import os
import pickle
import random
import shutil
import sys
import tarfile
import zipfile
import six
from six.moves.urllib.error import HTTPError, URLError
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
@ -15,15 +21,40 @@ import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image, ExifTags
# from keras.applications.vgg16 import preprocess_input
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from keras.utils import get_file
from skimage.transform import resize
from sklearn.metrics import pairwise_distances
from .align_face import align, aligner
from six.moves.urllib.request import urlopen
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def clip_img(X, preprocessing='raw'):
@ -57,13 +88,16 @@ def load_image(path):
class Faces(object):
def __init__(self, image_paths, sess):
def __init__(self, image_paths, sess, verbose=1):
self.verbose = verbose
self.aligner = aligner(sess)
self.org_faces = []
self.cropped_faces = []
self.cropped_faces_shape = []
self.cropped_index = []
self.callback_idx = []
if verbose:
print("Identify {} images".format(len(image_paths)))
for i, p in enumerate(image_paths):
cur_img = load_image(p)
self.org_faces.append(cur_img)
@ -73,6 +107,9 @@ class Faces(object):
cur_shapes = [f.shape[:-1] for f in cur_faces]
cur_faces_square = []
if verbose:
print("Find {} face(s) in {}".format(len(cur_faces), p.split("/")[-1]))
for img in cur_faces:
long_size = max([img.shape[1], img.shape[0]])
base = np.zeros((long_size, long_size, 3))
@ -270,7 +307,7 @@ def imagenet_reverse_preprocessing(x, data_format=None):
def reverse_process_cloaked(x, preprocess='imagenet'):
x = clip_img(x, preprocess)
# x = clip_img(x, preprocess)
return reverse_preprocess(x, preprocess)
@ -286,17 +323,18 @@ def load_extractor(name):
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
os.makedirs(model_dir, exist_ok=True)
model_file = os.path.join(model_dir, "{}.h5".format(name))
emb_file = os.path.join(model_dir, "{}_emb.p.gz".format(name))
if os.path.exists(model_file):
model = keras.models.load_model(model_file)
else:
get_file("{}.h5".format(name), "http://sandlab.cs.uchicago.edu/fawkes/files/{}.h5".format(name),
cache_dir=model_dir, cache_subdir='')
model = keras.models.load_model(model_file)
if not os.path.exists(emb_file):
get_file("{}_emb.p.gz".format(name), "http://sandlab.cs.uchicago.edu/fawkes/files/{}_emb.p.gz".format(name),
cache_dir=model_dir, cache_subdir='')
model = keras.models.load_model(model_file)
if hasattr(model.layers[-1], "activation") and model.layers[-1].activation == "softmax":
raise Exception(
"Given extractor's last layer is softmax, need to remove the top layers to make it into a feature extractor")
@ -404,12 +442,18 @@ def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, m
max_id = np.argmax(max_sum)
target_data_id = paths[int(max_id)]
image_dir = os.path.join(model_dir, "target_data/{}/*".format(target_data_id))
if not os.path.exists(image_dir):
get_file("{}.h5".format(name), "http://sandlab.cs.uchicago.edu/fawkes/files/target_images".format(name),
cache_dir=model_dir, cache_subdir='')
image_dir = os.path.join(model_dir, "target_data/{}".format(target_data_id))
# if not os.path.exists(image_dir):
os.makedirs(os.path.join(model_dir, "target_data"), exist_ok=True)
os.makedirs(image_dir, exist_ok=True)
for i in range(10):
if os.path.exists(os.path.join(model_dir, "target_data/{}/{}.jpg".format(target_data_id, i))):
continue
get_file("{}.jpg".format(i),
"http://sandlab.cs.uchicago.edu/fawkes/files/target_data/{}/{}.jpg".format(target_data_id, i),
cache_dir=model_dir, cache_subdir='target_data/{}/'.format(target_data_id))
image_paths = glob.glob(image_dir)
image_paths = glob.glob(image_dir + "/*.jpg")
target_images = [image.img_to_array(image.load_img(cur_path)) for cur_path in
image_paths]
@ -424,6 +468,107 @@ def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, m
target_images = random.sample(target_images, len(imgs))
return np.array(target_images)
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if not os.path.exists(fpath):
download = True
if download:
error_msg = 'URL fetch failure on {}: {} -- {}'
dl_progress = None
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
# ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _extract_archive(file_path, path='.', archive_format='auto'):
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def _makedirs_exist_ok(datadir):
if six.PY2:
# Python 2 doesn't have the exist_ok arg, so we try-except here.
try:
os.makedirs(datadir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
os.makedirs(datadir, exist_ok=True) # pylint: disable=unexpected-keyword-arg
# class CloakData(object):
# def __init__(self, protect_directory=None, img_shape=(224, 224)):
#

View File

@ -1,14 +1,28 @@
import http.client, urllib.request, urllib.parse, urllib.error
import http.client
import json
import random
import time
import urllib.error
import urllib.parse
import urllib.request
import requests
# Face API Key and Endpoint
f = open('api_key.txt', 'r')
data = f.read().split("\n")
subscription_key = data[0]
uri_base = data[1]
cloak_image_base = 'http://sandlab.cs.uchicago.edu/fawkes/files/cloak/{}_ultra_cloaked.png'
original_image_base = 'http://sandlab.cs.uchicago.edu/fawkes/files/cloak/{}.png'
#Face API Key and Endpoint
subscription_key = 'e127e26e4d534e2bad6fd9ca06145302'
uri_base = 'eastus.api.cognitive.microsoft.com'
# uri_base = 'https://shawn.cognitiveservices.azure.com/'
def detect_face(image_url):
r = requests.get(image_url)
if r.status_code != 200:
return None
headers = {
# Request headers
'Content-Type': 'application/json',
@ -32,6 +46,7 @@ def detect_face(image_url):
conn.request("POST", "/face/v1.0/detect?%s" % params, body, headers)
response = conn.getresponse()
data = json.loads(response.read())
print(data)
conn.close()
return data[0]["faceId"]
@ -102,12 +117,16 @@ def create_personId(personGroupId, personName):
conn.request("POST", "/face/v1.0/persongroups/{}/persons?%s".format(personGroupId) % params, body, headers)
response = conn.getresponse()
data = json.loads(response.read())
print(data)
# print(data)
conn.close()
return data["personId"]
def add_persistedFaceId(personGroupId, personId, image_url):
r = requests.get(image_url)
if r.status_code != 200:
return None
headers = {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
@ -123,11 +142,14 @@ def add_persistedFaceId(personGroupId, personId, image_url):
})
conn = http.client.HTTPSConnection(uri_base)
conn.request("POST", "/face/v1.0/persongroups/{}/persons/{}/persistedFaces?%s".format(personGroupId, personId) % params, body, headers)
conn.request("POST",
"/face/v1.0/persongroups/{}/persons/{}/persistedFaces?%s".format(personGroupId, personId) % params,
body, headers)
response = conn.getresponse()
data = json.loads(response.read())
print(data)
conn.close()
if "persistedFaceId" not in data:
return None
return data["persistedFaceId"]
@ -161,7 +183,8 @@ def get_personGroupPerson(personGroupId, personId):
body = json.dumps({})
conn = http.client.HTTPSConnection(uri_base)
conn.request("GET", "/face/v1.0/persongroups/{}/persons/{}?%s".format(personGroupId, personId) % params, body, headers)
conn.request("GET", "/face/v1.0/persongroups/{}/persons/{}?%s".format(personGroupId, personId) % params, body,
headers)
response = conn.getresponse()
data = json.loads(response.read())
print(data)
@ -208,6 +231,7 @@ def eval(original_faceIds, personGroupId, protect_personId):
conn.close()
face = data[0]
print(face)
if len(face["candidates"]) and face["candidates"][0]["personId"] == protect_personId:
return True
else:
@ -225,48 +249,20 @@ def delete_personGroupPerson(personGroupId, personId):
body = json.dumps({})
conn = http.client.HTTPSConnection(uri_base)
conn.request("DELETE", "/face/v1.0/persongroups/{}/persons/{}?%s".format(personGroupId, personId) % params, body, headers)
conn.request("DELETE", "/face/v1.0/persongroups/{}/persons/{}?%s".format(personGroupId, personId) % params, body,
headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close()
def add_protect_person(personGroupId, name):
personId = create_personId(personGroupId, name)
for idx in range(72):
cloaked_image_url = "https://super.cs.uchicago.edu/~shawn/cloaked/{}_c.png".format(idx)
add_persistedFaceId(personGroupId, personId, cloaked_image_url)
def add_sybil_person(personGroupId, name):
personId = create_personId(personGroupId, name)
for idx in range(82):
try:
cloaked_image_url = "https://super.cs.uchicago.edu/~shawn/sybils/{}_c.png".format(idx)
add_persistedFaceId(personGroupId, personId, cloaked_image_url)
except:
print(idx)
def add_other_person(personGroupId):
for idx_person in range(65):
personId = create_personId(personGroupId, str(idx_person))
for idx_image in range(90):
try:
image_url = "https://super.cs.uchicago.edu/~shawn/train/{}/{}.png".format(idx_person, idx_image)
add_persistedFaceId(personGroupId, personId, image_url)
except:
print(idx_person, idx_image)
def get_trainStatus(personGroupId):
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
})
params = urllib.parse.urlencode({})
body = json.dumps({})
@ -278,47 +274,74 @@ def get_trainStatus(personGroupId):
conn.close()
def test_original():
personGroupId = 'pubfig'
# create_personGroupId(personGroupId, 'pubfig')
# add protect person
protect_personId = 'd3df3012-6f3f-4c1b-b86d-55e91a352e01'
#protect_personId = create_personId(personGroupId, 'Emily')
#for idx in range(50):
# image_url = "https://super.cs.uchicago.edu/~shawn/cloaked/{}_o.png".format(idx)
# add_persistedFaceId(personGroupId, protect_personId, image_url)
def test_cloak():
NUM_TRAIN = 10
total_idx = range(0, 82)
TRAIN_RANGE = random.sample(total_idx, NUM_TRAIN)
TEST_RANGE = TRAIN_RANGE
personGroupId = 'all'
# delete_personGroup(personGroupId)
create_personGroupId(personGroupId, personGroupId)
with open("protect_personId.txt", 'r') as f:
protect_personId = f.read()
print(protect_personId)
delete_personGroupPerson(personGroupId, protect_personId)
protect_personId = create_personId(personGroupId, 'Emily')
with open("protect_personId.txt", 'w') as f:
f.write(protect_personId)
print("Created protect personId: {}".format(protect_personId))
for idx in TRAIN_RANGE:
image_url = cloak_image_base.format(idx)
r = add_persistedFaceId(personGroupId, protect_personId, image_url)
if r is not None:
print("Added {}".format(idx))
else:
print("Unable to add {}-th image of protect person".format(idx))
# add other people
#for idx_person in range(65):
# personId = create_personId(personGroupId, str(idx_person))
# for idx_image in range(50):
# try:
# image_url = "https://super.cs.uchicago.edu/~shawn/train/{}/{}.png".format(idx_person, idx_image)
# add_persistedFaceId(personGroupId, personId, image_url)
# except:
# print(idx_person, idx_image)
for idx_person in range(500):
personId = create_personId(personGroupId, str(idx_person))
print("Created personId: {}".format(idx_person))
for idx_image in range(10):
image_url = "http://sandlab.cs.uchicago.edu/fawkes/files/target_data/{}/{}.jpg".format(
idx_person, idx_image)
r = add_persistedFaceId(personGroupId, personId, image_url)
if r is not None:
print("Added {}".format(idx_image))
else:
print("Unable to add {}-th image".format(idx_image))
# train model based on personGroup
#train_personGroup(personGroupId)
#time.sleep(3)
#get_trainStatus(personGroupId)
#list_personGroupPerson(personGroupId)
train_personGroup(personGroupId)
time.sleep(4)
get_trainStatus(personGroupId)
# list_personGroupPerson(personGroupId)
idx_range = range(50, 82)
# test original image
idx_range = TEST_RANGE
acc = 0.
tot = 0.
for idx in idx_range:
original_image_url = "https://super.cs.uchicago.edu/~shawn/cloaked/{}_o.png".format(idx)
original_image_url = original_image_base.format(idx)
faceId = detect_face(original_image_url)
if faceId is None:
print("{} does not exist".format(idx))
continue
original_faceIds = [faceId]
# verify
res = eval(original_faceIds, personGroupId, protect_personId)
if res:
acc += 1.
tot += 1.
acc /= len(idx_range)
acc /= tot
print(acc) # 1.0
@ -358,42 +381,37 @@ def delete_personGroup(personGroupId):
conn.close()
def main():
test_cloak()
# delete_personGroup('cloaking')
# delete_personGroup('cloaking-emily')
# delete_personGroup('pubfig')
# list_personGroups()
# exit()
personGroupId = 'cloaking'
# personGroupId = 'cloaking'
# create_personGroupId(personGroupId, 'cloaking')
list_personGroups()
exit()
#delete_personGroupPerson(personGroupId, '0ac606cd-24b3-440f-866a-31adf2a1b446')
#add_protect_person(personGroupId, 'Emily')
#personId = create_personId(personGroupId, 'Emily')
#add_sybil_person(personGroupId, 'sybil')
protect_personId = '6c5a71eb-f39a-4570-b3f5-72cca3ab5a6b'
#delete_personGroupPerson(personGroupId, protect_personId)
#add_protect_person(personGroupId, 'Emily')
# train model based on personGroup
#train_personGroup(personGroupId)
get_trainStatus(personGroupId)
#add_other_person(personGroupId)
#list_personGroupPerson(personGroupId)
#delete_personGroupPerson(personGroupId, '80e32c80-bc69-416a-9dff-c8d42d7a3301')
idx_range = range(72, 82)
original_faceIds = []
for idx in idx_range:
original_image_url = "https://super.cs.uchicago.edu/~shawn/cloaked/{}_o.png".format(idx)
faceId = detect_face(original_image_url)
original_faceIds.append(faceId)
# verify
eval(original_faceIds, personGroupId, protect_personId)
# delete_personGroupPerson(personGroupId, '0ac606cd-24b3-440f-866a-31adf2a1b446')
# add_protect_person(personGroupId, 'Emily')
# protect_personId = create_personId(personGroupId, 'Emily')
# add_sybil_person(personGroupId, 'sybil')
#
# # train model based on personGroup
# train_personGroup(personGroupId)
# get_trainStatus(personGroupId)
# add_other_person(personGroupId)
# list_personGroupPerson(personGroupId)
#
# idx_range = range(72, 82)
# original_faceIds = []
# for idx in idx_range:
# original_image_url = "https://super.cs.uchicago.edu/~shawn/cloaked/{}_o.png".format(idx)
# faceId = detect_face(original_image_url)
# original_faceIds.append(faceId)
#
# # verify
# eval(original_faceIds, personGroupId, protect_personId)
if __name__ == '__main__':
main()
test_cloak()