2
0
mirror of https://github.com/Shawn-Shan/fawkes.git synced 2024-09-20 07:26:37 +05:30
Former-commit-id: 268fb7e6825ddfc1165fa7adc7c216f9d61005da [formerly 06376993a831c060c337ec6e7540252f0b2dfe09]
Former-commit-id: c4812d40187a76a878e7d215d22ee84811b41896
This commit is contained in:
Shawn-Shan 2020-07-01 21:16:03 -05:00
parent 3ba2abacf2
commit 889fd933e8
19 changed files with 1647 additions and 1099 deletions

View File

@ -0,0 +1 @@
58d500da850206b845bdd0150fa182a0ff8c50f0

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

80
fawkes/align_face.py Normal file
View File

@ -0,0 +1,80 @@
import detect_face
import numpy as np
import tensorflow as tf
# modify the default parameters of np.load
np_load_old = np.load
np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
def aligner(sess):
pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
return [pnet, rnet, onet]
def align(orig_img, aligner, margin=0.8, detect_multiple_faces=True):
pnet, rnet, onet = aligner
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
if orig_img.ndim < 2:
return None
if orig_img.ndim == 2:
orig_img = to_rgb(orig_img)
orig_img = orig_img[:, :, 0:3]
bounding_boxes, _ = detect_face.detect_face(orig_img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
if nrof_faces > 0:
det = bounding_boxes[:, 0:4]
det_arr = []
img_size = np.asarray(orig_img.shape)[0:2]
if nrof_faces > 1:
margin = margin / 1.5
if detect_multiple_faces:
for i in range(nrof_faces):
det_arr.append(np.squeeze(det[i]))
else:
bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
img_center = img_size / 2
offsets = np.vstack([(det[:, 0] + det[:, 2]) / 2 - img_center[1],
(det[:, 1] + det[:, 3]) / 2 - img_center[0]])
offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
index = np.argmax(bounding_box_size - offset_dist_squared * 2.0) # some extra weight on the centering
det_arr.append(det[index, :])
else:
det_arr.append(np.squeeze(det))
cropped_arr = []
bounding_boxes_arr = []
for i, det in enumerate(det_arr):
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
side_1 = int((det[2] - det[0]) * margin)
side_2 = int((det[3] - det[1]) * margin)
bb[0] = np.maximum(det[0] - side_1 / 2, 0)
bb[1] = np.maximum(det[1] - side_1 / 2, 0)
bb[2] = np.minimum(det[2] + side_2 / 2, img_size[1])
bb[3] = np.minimum(det[3] + side_2 / 2, img_size[0])
cropped = orig_img[bb[1]:bb[3], bb[0]:bb[2], :]
cropped_arr.append(cropped)
bounding_boxes_arr.append([bb[0], bb[1], bb[2], bb[3]])
# scaled = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
return cropped_arr, bounding_boxes_arr
else:
return None
#
# if __name__ == '__main__':
# orig_img = misc.imread('orig_img.jpeg')
# cropped_arr, bounding_boxes_arr = align(orig_img)
# misc.imsave('test_output.jpeg', cropped_arr[0])
# print(bounding_boxes_arr)
#

794
fawkes/detect_face.py Normal file
View File

@ -0,0 +1,794 @@
""" Tensorflow implementation of the face detection / alignment algorithm found at
https://github.com/kpzhang93/MTCNN_face_detection_alignment
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# from math import floor
import cv2
import numpy as np
import tensorflow as tf
from six import string_types, iteritems
def layer(op):
"""Decorator for composable network layers."""
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.setup()
def setup(self):
"""Construct the network. """
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
"""Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
"""
data_dict = np.load(data_path, encoding='latin1').item() # pylint: disable=no-member
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iteritems(data_dict[op_name]):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
"""Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
"""
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, string_types):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
"""Returns the current network output."""
return self.terminals[-1]
def get_unique_name(self, prefix):
"""Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
"""
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
"""Creates a new TensorFlow variable."""
return tf.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
"""Verifies that the padding is one of the supported ones."""
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
inp,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding='SAME',
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = int(inp.get_shape()[-1])
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
# This is the common-case. Convolve the input without any further complications.
output = convolve(inp, kernel)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def prelu(self, inp, name):
with tf.variable_scope(name):
i = int(inp.get_shape()[-1])
alpha = self.make_var('alpha', shape=(i,))
output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))
return output
@layer
def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding='SAME'):
self.validate_padding(padding)
return tf.nn.max_pool(inp,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def fc(self, inp, num_out, name, relu=True):
with tf.variable_scope(name):
input_shape = inp.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= int(d)
feed_in = tf.reshape(inp, [-1, dim])
else:
feed_in, dim = (inp, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=name)
return fc
"""
Multi dimensional softmax,
refer to https://github.com/tensorflow/tensorflow/issues/210
compute softmax along the dimension of target
the native softmax only supports batch_size x dimension
"""
@layer
def softmax(self, target, axis, name=None):
max_axis = tf.reduce_max(target, axis, keepdims=True)
target_exp = tf.exp(target - max_axis)
normalize = tf.reduce_sum(target_exp, axis, keepdims=True)
softmax = tf.div(target_exp, normalize, name)
return softmax
class PNet(Network):
def setup(self):
(self.feed('data') # pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='PReLU1')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='PReLU2')
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='PReLU3')
.conv(1, 1, 2, 1, 1, relu=False, name='conv4-1')
.softmax(3, name='prob1'))
(self.feed('PReLU3') # pylint: disable=no-value-for-parameter
.conv(1, 1, 4, 1, 1, relu=False, name='conv4-2'))
class RNet(Network):
def setup(self):
(self.feed('data') # pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.fc(128, relu=False, name='conv4')
.prelu(name='prelu4')
.fc(2, relu=False, name='conv5-1')
.softmax(1, name='prob1'))
(self.feed('prelu4') # pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv5-2'))
class ONet(Network):
def setup(self):
(self.feed('data') # pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4')
.prelu(name='prelu4')
.fc(256, relu=False, name='conv5')
.prelu(name='prelu5')
.fc(2, relu=False, name='conv6-1')
.softmax(1, name='prob1'))
(self.feed('prelu5') # pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv6-2'))
(self.feed('prelu5') # pylint: disable=no-value-for-parameter
.fc(10, relu=False, name='conv6-3'))
def create_mtcnn(sess, model_path):
if not model_path:
model_path, _ = os.path.split(os.path.realpath(__file__))
with tf.variable_scope('pnet'):
data = tf.placeholder(tf.float32, (None, None, None, 3), 'input')
pnet = PNet({'data': data})
pnet.load(os.path.join(model_path, 'weights/det1.npy'), sess)
with tf.variable_scope('rnet'):
data = tf.placeholder(tf.float32, (None, 24, 24, 3), 'input')
rnet = RNet({'data': data})
rnet.load(os.path.join(model_path, 'weights/det2.npy'), sess)
with tf.variable_scope('onet'):
data = tf.placeholder(tf.float32, (None, 48, 48, 3), 'input')
onet = ONet({'data': data})
onet.load(os.path.join(model_path, 'weights/det3.npy'), sess)
pnet_fun = lambda img: sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0': img})
rnet_fun = lambda img: sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0': img})
onet_fun = lambda img: sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'),
feed_dict={'onet/input:0': img})
return pnet_fun, rnet_fun, onet_fun
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
"""Detects faces in an image, and returns bounding boxes and points for them.
img: input image
minsize: minimum faces' size
pnet, rnet, onet: caffemodel
threshold: threshold=[th1, th2, th3], th1-3 are three steps's threshold
factor: the factor used to create a scaling pyramid of face sizes to detect in the image.
"""
factor_count = 0
total_boxes = np.empty((0, 9))
points = np.empty(0)
h = img.shape[0]
w = img.shape[1]
minl = np.amin([h, w])
m = 12.0 / minsize
minl = minl * m
# create scale pyramid
scales = []
while minl >= 12:
scales += [m * np.power(factor, factor_count)]
minl = minl * factor
factor_count += 1
# first stage
for scale in scales:
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
boxes, _ = generateBoundingBox(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox > 0:
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick, :]
regw = total_boxes[:, 2] - total_boxes[:, 0]
regh = total_boxes[:, 3] - total_boxes[:, 1]
qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage
tempimg = np.zeros((24, 24, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
ipass = np.where(score > threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick, :]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48, 48, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
ipass = np.where(score > threshold[2])
points = points[:, ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
w = total_boxes[:, 2] - total_boxes[:, 0] + 1
h = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1
points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1
if total_boxes.shape[0] > 0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick, :]
points = points[:, pick]
return total_boxes, points
def bulk_detect_face(images, detection_window_size_ratio, pnet, rnet, onet, threshold, factor):
"""Detects faces in a list of images
images: list containing input images
detection_window_size_ratio: ratio of minimum face size to smallest image dimension
pnet, rnet, onet: caffemodel
threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold [0-1]
factor: the factor used to create a scaling pyramid of face sizes to detect in the image.
"""
all_scales = [None] * len(images)
images_with_boxes = [None] * len(images)
for i in range(len(images)):
images_with_boxes[i] = {'total_boxes': np.empty((0, 9))}
# create scale pyramid
for index, img in enumerate(images):
all_scales[index] = []
h = img.shape[0]
w = img.shape[1]
minsize = int(detection_window_size_ratio * np.minimum(w, h))
factor_count = 0
minl = np.amin([h, w])
if minsize <= 12:
minsize = 12
m = 12.0 / minsize
minl = minl * m
while minl >= 12:
all_scales[index].append(m * np.power(factor, factor_count))
minl = minl * factor
factor_count += 1
# # # # # # # # # # # # #
# first stage - fast proposal network (pnet) to obtain face candidates
# # # # # # # # # # # # #
images_obj_per_resolution = {}
# TODO: use some type of rounding to number module 8 to increase probability that pyramid images will have the same resolution across input images
for index, scales in enumerate(all_scales):
h = images[index].shape[0]
w = images[index].shape[1]
for scale in scales:
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
if (ws, hs) not in images_obj_per_resolution:
images_obj_per_resolution[(ws, hs)] = []
im_data = imresample(images[index], (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_y = np.transpose(im_data, (1, 0, 2)) # caffe uses different dimensions ordering
images_obj_per_resolution[(ws, hs)].append({'scale': scale, 'image': img_y, 'index': index})
for resolution in images_obj_per_resolution:
images_per_resolution = [i['image'] for i in images_obj_per_resolution[resolution]]
outs = pnet(images_per_resolution)
for index in range(len(outs[0])):
scale = images_obj_per_resolution[resolution][index]['scale']
image_index = images_obj_per_resolution[resolution][index]['index']
out0 = np.transpose(outs[0][index], (1, 0, 2))
out1 = np.transpose(outs[1][index], (1, 0, 2))
boxes, _ = generateBoundingBox(out1[:, :, 1].copy(), out0[:, :, :].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
images_with_boxes[image_index]['total_boxes'] = np.append(images_with_boxes[image_index]['total_boxes'],
boxes,
axis=0)
for index, image_obj in enumerate(images_with_boxes):
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
regw = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0]
regh = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1]
qq1 = image_obj['total_boxes'][:, 0] + image_obj['total_boxes'][:, 5] * regw
qq2 = image_obj['total_boxes'][:, 1] + image_obj['total_boxes'][:, 6] * regh
qq3 = image_obj['total_boxes'][:, 2] + image_obj['total_boxes'][:, 7] * regw
qq4 = image_obj['total_boxes'][:, 3] + image_obj['total_boxes'][:, 8] * regh
image_obj['total_boxes'] = np.transpose(np.vstack([qq1, qq2, qq3, qq4, image_obj['total_boxes'][:, 4]]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
image_obj['total_boxes'][:, 0:4] = np.fix(image_obj['total_boxes'][:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
numbox = image_obj['total_boxes'].shape[0]
tempimg = np.zeros((24, 24, 3, numbox))
if numbox > 0:
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
image_obj['rnet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
# # # # # # # # # # # # #
# second stage - refinement of face candidates with rnet
# # # # # # # # # # # # #
bulk_rnet_input = np.empty((0, 24, 24, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' in image_obj:
bulk_rnet_input = np.append(bulk_rnet_input, image_obj['rnet_input'], axis=0)
out = rnet(bulk_rnet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
i = 0
for index, image_obj in enumerate(images_with_boxes):
if 'rnet_input' not in image_obj:
continue
rnet_input_count = image_obj['rnet_input'].shape[0]
score_per_image = score[i:i + rnet_input_count]
out0_per_image = out0[:, i:i + rnet_input_count]
ipass = np.where(score_per_image > threshold[1])
image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
np.expand_dims(score_per_image[ipass].copy(), 1)])
mv = out0_per_image[:, ipass[0]]
if image_obj['total_boxes'].shape[0] > 0:
h = images[index].shape[0]
w = images[index].shape[1]
pick = nms(image_obj['total_boxes'], 0.7, 'Union')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv[:, pick]))
image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
numbox = image_obj['total_boxes'].shape[0]
if numbox > 0:
tempimg = np.zeros((48, 48, 3, numbox))
image_obj['total_boxes'] = np.fix(image_obj['total_boxes']).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
image_obj['onet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
i += rnet_input_count
# # # # # # # # # # # # #
# third stage - further refinement and facial landmarks positions with onet
# # # # # # # # # # # # #
bulk_onet_input = np.empty((0, 48, 48, 3))
for index, image_obj in enumerate(images_with_boxes):
if 'onet_input' in image_obj:
bulk_onet_input = np.append(bulk_onet_input, image_obj['onet_input'], axis=0)
out = onet(bulk_onet_input)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
i = 0
ret = []
for index, image_obj in enumerate(images_with_boxes):
if 'onet_input' not in image_obj:
ret.append(None)
continue
onet_input_count = image_obj['onet_input'].shape[0]
out0_per_image = out0[:, i:i + onet_input_count]
score_per_image = score[i:i + onet_input_count]
points_per_image = points[:, i:i + onet_input_count]
ipass = np.where(score_per_image > threshold[2])
points_per_image = points_per_image[:, ipass[0]]
image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
np.expand_dims(score_per_image[ipass].copy(), 1)])
mv = out0_per_image[:, ipass[0]]
w = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0] + 1
h = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1] + 1
points_per_image[0:5, :] = np.tile(w, (5, 1)) * points_per_image[0:5, :] + np.tile(
image_obj['total_boxes'][:, 0], (5, 1)) - 1
points_per_image[5:10, :] = np.tile(h, (5, 1)) * points_per_image[5:10, :] + np.tile(
image_obj['total_boxes'][:, 1], (5, 1)) - 1
if image_obj['total_boxes'].shape[0] > 0:
image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv))
pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Min')
image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
points_per_image = points_per_image[:, pick]
ret.append((image_obj['total_boxes'], points_per_image))
else:
ret.append(None)
i += onet_input_count
return ret
# function [boundingbox] = bbreg(boundingbox,reg)
def bbreg(boundingbox, reg):
"""Calibrate bounding boxes"""
if reg.shape[1] == 1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
b1 = boundingbox[:, 0] + reg[:, 0] * w
b2 = boundingbox[:, 1] + reg[:, 1] * h
b3 = boundingbox[:, 2] + reg[:, 2] * w
b4 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4]))
return boundingbox
def generateBoundingBox(imap, reg, scale, t):
"""Use heatmap to generate bounding boxes"""
stride = 2
cellsize = 12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:, :, 0])
dy1 = np.transpose(reg[:, :, 1])
dx2 = np.transpose(reg[:, :, 2])
dy2 = np.transpose(reg[:, :, 3])
y, x = np.where(imap >= t)
if y.shape[0] == 1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
score = imap[(y, x)]
reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]]))
if reg.size == 0:
reg = np.empty((0, 3))
bb = np.transpose(np.vstack([y, x]))
q1 = np.fix((stride * bb + 1) / scale)
q2 = np.fix((stride * bb + cellsize - 1 + 1) / scale)
boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg])
return boundingbox, reg
# function pick = nms(boxes,threshold,type)
def nms(boxes, threshold, method):
if boxes.size == 0:
return np.empty((0, 3))
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
s = boxes[:, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size > 0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o <= threshold)]
pick = pick[0:counter]
return pick
# function [dy edy dx edx y ey x ex tmpw tmph] = pad(total_boxes,w,h)
def pad(total_boxes, w, h):
"""Compute the padding coordinates (pad the bounding boxes to square)"""
tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32)
tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones((numbox), dtype=np.int32)
dy = np.ones((numbox), dtype=np.int32)
edx = tmpw.copy().astype(np.int32)
edy = tmph.copy().astype(np.int32)
x = total_boxes[:, 0].copy().astype(np.int32)
y = total_boxes[:, 1].copy().astype(np.int32)
ex = total_boxes[:, 2].copy().astype(np.int32)
ey = total_boxes[:, 3].copy().astype(np.int32)
tmp = np.where(ex > w)
edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1)
ex[tmp] = w
tmp = np.where(ey > h)
edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1)
ey[tmp] = h
tmp = np.where(x < 1)
dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1)
x[tmp] = 1
tmp = np.where(y < 1)
dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
# function [bboxA] = rerec(bboxA)
def rerec(bboxA):
"""Convert bboxA to square."""
h = bboxA[:, 3] - bboxA[:, 1]
w = bboxA[:, 2] - bboxA[:, 0]
l = np.maximum(w, h)
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5
bboxA[:, 2:4] = bboxA[:, 0:2] + np.transpose(np.tile(l, (2, 1)))
return bboxA
def imresample(img, sz):
im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) # @UndefinedVariable
return im_data
# This method is kept for debugging purpose
# h=img.shape[0]
# w=img.shape[1]
# hs, ws = sz
# dx = float(w) / ws
# dy = float(h) / hs
# im_data = np.zeros((hs,ws,3))
# for a1 in range(0,hs):
# for a2 in range(0,ws):
# for a3 in range(0,3):
# im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3]
# return im_data

View File

@ -47,7 +47,7 @@ class FawkesMaskGeneration:
max_iterations=MAX_ITERATIONS, initial_const=INITIAL_CONST, max_iterations=MAX_ITERATIONS, initial_const=INITIAL_CONST,
intensity_range=INTENSITY_RANGE, l_threshold=L_THRESHOLD, intensity_range=INTENSITY_RANGE, l_threshold=L_THRESHOLD,
max_val=MAX_VAL, keep_final=KEEP_FINAL, maximize=MAXIMIZE, image_shape=IMAGE_SHAPE, max_val=MAX_VAL, keep_final=KEEP_FINAL, maximize=MAXIMIZE, image_shape=IMAGE_SHAPE,
verbose=0, ratio=RATIO, limit_dist=LIMIT_DIST): verbose=0, ratio=RATIO, limit_dist=LIMIT_DIST, faces=None):
assert intensity_range in {'raw', 'imagenet', 'inception', 'mnist'} assert intensity_range in {'raw', 'imagenet', 'inception', 'mnist'}
@ -69,10 +69,12 @@ class FawkesMaskGeneration:
self.ratio = ratio self.ratio = ratio
self.limit_dist = limit_dist self.limit_dist = limit_dist
self.single_shape = list(image_shape) self.single_shape = list(image_shape)
self.faces = faces
self.input_shape = tuple([self.batch_size] + self.single_shape) self.input_shape = tuple([self.batch_size] + self.single_shape)
self.bottleneck_shape = tuple([self.batch_size] + self.single_shape) self.bottleneck_shape = tuple([self.batch_size] + self.single_shape)
# self.bottleneck_shape = tuple([self.batch_size, bottleneck_model_ls[0].output_shape[-1]])
# the variable we're going to optimize over # the variable we're going to optimize over
self.modifier = tf.Variable(np.zeros(self.input_shape, dtype=np.float32)) self.modifier = tf.Variable(np.zeros(self.input_shape, dtype=np.float32))
@ -149,8 +151,6 @@ class FawkesMaskGeneration:
self.dist_raw, self.dist_raw,
tf.zeros_like(self.dist_raw))) tf.zeros_like(self.dist_raw)))
self.dist_sum = tf.reduce_sum(tf.where(self.mask, self.dist, tf.zeros_like(self.dist))) self.dist_sum = tf.reduce_sum(tf.where(self.mask, self.dist, tf.zeros_like(self.dist)))
# self.dist_sum = 1e-5 * tf.reduce_sum(self.dist)
# self.dist_raw_sum = self.dist_sum
def resize_tensor(input_tensor, model_input_shape): def resize_tensor(input_tensor, model_input_shape):
if input_tensor.shape[1:] == model_input_shape or model_input_shape[1] is None: if input_tensor.shape[1:] == model_input_shape or model_input_shape[1] is None:
@ -171,16 +171,14 @@ class FawkesMaskGeneration:
self.bottleneck_a = bottleneck_model(cur_aimg_input) self.bottleneck_a = bottleneck_model(cur_aimg_input)
if self.MIMIC_IMG: if self.MIMIC_IMG:
# cur_timg_input = resize_tensor(self.timg_input, model_input_shape)
# cur_simg_input = resize_tensor(self.simg_input, model_input_shape)
cur_timg_input = self.timg_input cur_timg_input = self.timg_input
cur_simg_input = self.simg_input cur_simg_input = self.simg_input
self.bottleneck_t = calculate_direction(bottleneck_model, cur_timg_input, cur_simg_input) self.bottleneck_t = calculate_direction(bottleneck_model, cur_timg_input, cur_simg_input)
# self.bottleneck_t = bottleneck_model(cur_timg_input)
else: else:
self.bottleneck_t = self.bottleneck_t_raw self.bottleneck_t = self.bottleneck_t_raw
bottleneck_diff = self.bottleneck_t - self.bottleneck_a bottleneck_diff = self.bottleneck_t - self.bottleneck_a
scale_factor = tf.sqrt(tf.reduce_sum(tf.square(self.bottleneck_t), axis=1)) scale_factor = tf.sqrt(tf.reduce_sum(tf.square(self.bottleneck_t), axis=1))
cur_bottlesim = tf.sqrt(tf.reduce_sum(tf.square(bottleneck_diff), axis=1)) cur_bottlesim = tf.sqrt(tf.reduce_sum(tf.square(bottleneck_diff), axis=1))
@ -189,7 +187,6 @@ class FawkesMaskGeneration:
self.bottlesim += cur_bottlesim self.bottlesim += cur_bottlesim
# self.bottlesim_push += cur_bottlesim_push_sum
self.bottlesim_sum += cur_bottlesim_sum self.bottlesim_sum += cur_bottlesim_sum
# sum up the losses # sum up the losses
@ -202,20 +199,13 @@ class FawkesMaskGeneration:
self.loss, self.loss,
tf.zeros_like(self.loss))) tf.zeros_like(self.loss)))
# self.loss_sum = self.dist_sum + tf.reduce_sum(self.bottlesim)
# import pdb
# pdb.set_trace()
# self.loss_sum = tf.reduce_sum(tf.where(self.mask, self.loss, tf.zeros_like(self.loss)))
# Setup the Adadelta optimizer and keep track of variables
# we're creating
start_vars = set(x.name for x in tf.global_variables()) start_vars = set(x.name for x in tf.global_variables())
self.learning_rate_holder = tf.placeholder(tf.float32, shape=[]) self.learning_rate_holder = tf.placeholder(tf.float32, shape=[])
optimizer = tf.train.AdadeltaOptimizer(self.learning_rate_holder) optimizer = tf.train.AdadeltaOptimizer(self.learning_rate_holder)
# optimizer = tf.train.AdamOptimizer(self.learning_rate_holder) # optimizer = tf.train.AdamOptimizer(self.learning_rate_holder)
self.train = optimizer.minimize(self.loss_sum, self.train = optimizer.minimize(self.loss_sum, var_list=[self.modifier])
var_list=[self.modifier])
end_vars = tf.global_variables() end_vars = tf.global_variables()
new_vars = [x for x in end_vars if x.name not in start_vars] new_vars = [x for x in end_vars if x.name not in start_vars]
@ -297,6 +287,7 @@ class FawkesMaskGeneration:
LR = self.learning_rate LR = self.learning_rate
nb_imgs = source_imgs.shape[0] nb_imgs = source_imgs.shape[0]
mask = [True] * nb_imgs + [False] * (self.batch_size - nb_imgs) mask = [True] * nb_imgs + [False] * (self.batch_size - nb_imgs)
# mask = [True] * self.batch_size
mask = np.array(mask, dtype=np.bool) mask = np.array(mask, dtype=np.bool)
source_imgs = np.array(source_imgs) source_imgs = np.array(source_imgs)
@ -317,19 +308,34 @@ class FawkesMaskGeneration:
timg_tanh_batch = np.zeros(self.input_shape) timg_tanh_batch = np.zeros(self.input_shape)
else: else:
timg_tanh_batch = np.zeros(self.bottleneck_shape) timg_tanh_batch = np.zeros(self.bottleneck_shape)
weights_batch = np.zeros(self.bottleneck_shape) weights_batch = np.zeros(self.bottleneck_shape)
simg_tanh_batch[:nb_imgs] = simg_tanh[:nb_imgs] simg_tanh_batch[:nb_imgs] = simg_tanh[:nb_imgs]
timg_tanh_batch[:nb_imgs] = timg_tanh[:nb_imgs] timg_tanh_batch[:nb_imgs] = timg_tanh[:nb_imgs]
weights_batch[:nb_imgs] = weights[:nb_imgs] weights_batch[:nb_imgs] = weights[:nb_imgs]
modifier_batch = np.ones(self.input_shape) * 1e-6 modifier_batch = np.ones(self.input_shape) * 1e-6
self.sess.run(self.setup, temp_images = []
{self.assign_timg_tanh: timg_tanh_batch,
self.assign_simg_tanh: simg_tanh_batch, # set the variables so that we don't have to send them over again
self.assign_const: CONST, if self.MIMIC_IMG:
self.assign_mask: mask, self.sess.run(self.setup,
self.assign_weights: weights_batch, {self.assign_timg_tanh: timg_tanh_batch,
self.assign_modifier: modifier_batch}) self.assign_simg_tanh: simg_tanh_batch,
self.assign_const: CONST,
self.assign_mask: mask,
self.assign_weights: weights_batch,
self.assign_modifier: modifier_batch})
else:
# if directly mimicking a vector, use assign_bottleneck_t_raw
# in setup
self.sess.run(self.setup,
{self.assign_bottleneck_t_raw: timg_tanh_batch,
self.assign_simg_tanh: simg_tanh_batch,
self.assign_const: CONST,
self.assign_mask: mask,
self.assign_weights: weights_batch,
self.assign_modifier: modifier_batch})
best_bottlesim = [0] * nb_imgs if self.maximize else [np.inf] * nb_imgs best_bottlesim = [0] * nb_imgs if self.maximize else [np.inf] * nb_imgs
best_adv = np.zeros_like(source_imgs) best_adv = np.zeros_like(source_imgs)
@ -347,6 +353,7 @@ class FawkesMaskGeneration:
dist_raw_sum, dist_raw_sum,
bottlesim_sum / nb_imgs)) bottlesim_sum / nb_imgs))
finished_idx = set()
try: try:
total_distance = [0] * nb_imgs total_distance = [0] * nb_imgs
@ -369,8 +376,14 @@ class FawkesMaskGeneration:
[self.dist_raw, [self.dist_raw,
self.bottlesim, self.bottlesim,
self.aimg_input]) self.aimg_input])
all_clear = True
for e, (dist_raw, bottlesim, aimg_input) in enumerate( for e, (dist_raw, bottlesim, aimg_input) in enumerate(
zip(dist_raw_list, bottlesim_list, aimg_input_list)): zip(dist_raw_list, bottlesim_list, aimg_input_list)):
if e in finished_idx:
continue
if e >= nb_imgs: if e >= nb_imgs:
break break
if (bottlesim < best_bottlesim[e] and bottlesim > total_distance[e] * 0.1 and ( if (bottlesim < best_bottlesim[e] and bottlesim > total_distance[e] * 0.1 and (
@ -379,40 +392,55 @@ class FawkesMaskGeneration:
best_bottlesim[e] = bottlesim best_bottlesim[e] = bottlesim
best_adv[e] = aimg_input best_adv[e] = aimg_input
if iteration != 0 and iteration % (self.MAX_ITERATIONS // 3) == 0: # if iteration > 20 and (dist_raw >= self.l_threshold or iteration == self.MAX_ITERATIONS - 1):
# LR = LR / 2 # finished_idx.add(e)
# print("{} finished at dist {}".format(e, dist_raw))
# best_bottlesim[e] = bottlesim
# best_adv[e] = aimg_input
#
all_clear = False
if all_clear:
break
if iteration != 0 and iteration % (self.MAX_ITERATIONS // 2) == 0:
LR = LR / 2
print("Learning Rate: ", LR) print("Learning Rate: ", LR)
if iteration % (self.MAX_ITERATIONS // 10) == 0: if iteration % (self.MAX_ITERATIONS // 5) == 0:
if self.verbose == 1: if self.verbose == 1:
loss_sum = float(self.sess.run(self.loss_sum))
dist_sum = float(self.sess.run(self.dist_sum))
thresh_over = (dist_sum /
self.batch_size /
self.l_threshold *
100)
dist_raw_sum = float(self.sess.run(self.dist_raw_sum)) dist_raw_sum = float(self.sess.run(self.dist_raw_sum))
bottlesim_sum = self.sess.run(self.bottlesim_sum) bottlesim_sum = self.sess.run(self.bottlesim_sum)
print('ITER %4d: Total loss: %.4E; perturb: %.6f (%.2f%% over, raw: %.6f); sim: %f' print('ITER %4d perturb: %.5f; sim: %f'
% (iteration, % (iteration, dist_raw_sum / nb_imgs, bottlesim_sum / nb_imgs))
Decimal(loss_sum),
dist_sum, # protected_images = aimg_input_list
thresh_over, #
dist_raw_sum, # orginal_images = np.copy(self.faces.cropped_faces)
bottlesim_sum / nb_imgs)) # cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked(
# orginal_images)
# final_images = self.faces.merge_faces(cloak_perturbation)
#
# for p_img, img in zip(protected_images, final_images):
# dump_image(reverse_process_cloaked(p_img),
# "/home/shansixioing/fawkes/data/emily/emily_cloaked_cropped{}.png".format(iteration),
# format='png')
#
# dump_image(img,
# "/home/shansixioing/fawkes/data/emily/emily_cloaked_{}.png".format(iteration),
# format='png')
except KeyboardInterrupt: except KeyboardInterrupt:
pass pass
if self.verbose == 1: if self.verbose == 1:
loss_sum = float(self.sess.run(self.loss_sum)) loss_sum = float(self.sess.run(self.loss_sum))
dist_sum = float(self.sess.run(self.dist_sum)) dist_sum = float(self.sess.run(self.dist_sum))
thresh_over = (dist_sum / self.batch_size / self.l_threshold * 100)
dist_raw_sum = float(self.sess.run(self.dist_raw_sum)) dist_raw_sum = float(self.sess.run(self.dist_raw_sum))
bottlesim_sum = float(self.sess.run(self.bottlesim_sum)) bottlesim_sum = float(self.sess.run(self.bottlesim_sum))
print('END: Total loss: %.4E; perturb: %.6f (%.2f%% over, raw: %.6f); sim: %f' print('END: Total loss: %.4E; perturb: %.6f (raw: %.6f); sim: %f'
% (Decimal(loss_sum), % (Decimal(loss_sum),
dist_sum, dist_sum,
thresh_over,
dist_raw_sum, dist_raw_sum,
bottlesim_sum / nb_imgs)) bottlesim_sum / nb_imgs))

View File

@ -0,0 +1 @@
837da51fc1cd7e21f6989badd07c3ccec543833e

View File

@ -6,21 +6,16 @@ import sys
import numpy as np import numpy as np
from differentiator import FawkesMaskGeneration from differentiator import FawkesMaskGeneration
from keras.applications.vgg16 import preprocess_input from utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, \
from keras.preprocessing import image Faces
from skimage.transform import resize
from tensorflow import set_random_seed
from utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked
random.seed(12243) random.seed(12243)
np.random.seed(122412) np.random.seed(122412)
set_random_seed(12242)
BATCH_SIZE = 1 BATCH_SIZE = 10
MAX_ITER = 1000
def generate_cloak_images(sess, feature_extractors, image_X, target_X=None, th=0.01): def generate_cloak_images(sess, feature_extractors, image_X, target_emb=None, th=0.01, faces=None):
batch_size = BATCH_SIZE if len(image_X) > BATCH_SIZE else len(image_X) batch_size = BATCH_SIZE if len(image_X) > BATCH_SIZE else len(image_X)
differentiator = FawkesMaskGeneration(sess, feature_extractors, differentiator = FawkesMaskGeneration(sess, feature_extractors,
@ -29,92 +24,117 @@ def generate_cloak_images(sess, feature_extractors, image_X, target_X=None, th=0
intensity_range='imagenet', intensity_range='imagenet',
initial_const=args.sd, initial_const=args.sd,
learning_rate=args.lr, learning_rate=args.lr,
max_iterations=MAX_ITER, max_iterations=args.max_step,
l_threshold=th, l_threshold=th,
verbose=1, maximize=False, keep_final=False, image_shape=image_X.shape[1:]) verbose=1, maximize=False, keep_final=False, image_shape=image_X.shape[1:],
faces=faces)
cloaked_image_X = differentiator.attack(image_X, target_X) cloaked_image_X = differentiator.attack(image_X, target_emb)
return cloaked_image_X return cloaked_image_X
def get_mode_config(mode): def get_mode_config(mode):
if mode == 'low': if mode == 'low':
args.feature_extractor = "low_extract" args.feature_extractor = "low_extract"
# args.th = 0.003
args.th = 0.001 args.th = 0.001
elif mode == 'mid': elif mode == 'mid':
args.feature_extractor = "mid_extract" args.feature_extractor = "mid_extract"
args.th = 0.001 args.th = 0.004
elif mode == 'high': elif mode == 'high':
args.feature_extractor = "high_extract" args.feature_extractor = "high_extract"
args.th = 0.005 args.th = 0.004
elif mode == 'ultra': elif mode == 'ultra':
args.feature_extractor = "high_extract" args.feature_extractor = "high_extract"
args.th = 0.007 args.th = 0.03
elif mode == 'custom': elif mode == 'custom':
pass pass
else: else:
raise Exception("mode must be one of 'low', 'mid', 'high', 'ultra', 'custom'") raise Exception("mode must be one of 'low', 'mid', 'high', 'ultra', 'custom'")
def extract_faces(img): def check_imgs(imgs):
# wait on Huiying if np.max(imgs) <= 1 and np.min(imgs) >= 0:
return preprocess_input(resize(img, (224, 224))) imgs = imgs * 255.0
elif np.max(imgs) <= 255 and np.min(imgs) >= 0:
pass
else:
raise Exception("Image values ")
return imgs
def fawkes(): def fawkes():
assert args.format in ['png', 'jpg', 'jpeg']
if args.format == 'jpg':
args.format = 'jpeg'
get_mode_config(args.mode) get_mode_config(args.mode)
sess = init_gpu(args.gpu) sess = init_gpu(args.gpu)
feature_extractors_ls = [load_extractor(args.feature_extractor)] # feature_extractors_ls = [load_extractor(args.feature_extractor)]
# fs_names = ['mid_extract', 'high_extract']
fs_names = [args.feature_extractor]
feature_extractors_ls = [load_extractor(name) for name in fs_names]
image_paths = glob.glob(os.path.join(args.directory, "*")) image_paths = glob.glob(os.path.join(args.directory, "*"))
image_paths = [path for path in image_paths if "_cloaked" not in path.split("/")[-1]] image_paths = [path for path in image_paths if "_cloaked" not in path.split("/")[-1]]
orginal_images = [extract_faces(image.img_to_array(image.load_img(cur_path))) for cur_path in faces = Faces(image_paths, sess)
image_paths]
orginal_images = faces.cropped_faces
orginal_images = np.array(orginal_images) orginal_images = np.array(orginal_images)
if args.seperate_target: if args.separate_target:
target_images = [] target_embedding = []
for org_img in orginal_images: for org_img in orginal_images:
org_img = org_img.reshape([1] + list(org_img.shape)) org_img = org_img.reshape([1] + list(org_img.shape))
tar_img = select_target_label(org_img, feature_extractors_ls, [args.feature_extractor]) tar_emb = select_target_label(org_img, feature_extractors_ls, fs_names)
target_images.append(tar_img) target_embedding.append(tar_emb)
target_images = np.concatenate(target_images) target_embedding = np.concatenate(target_embedding)
else: else:
target_images = select_target_label(orginal_images, feature_extractors_ls, [args.feature_extractor]) target_embedding = select_target_label(orginal_images, feature_extractors_ls, fs_names)
protected_images = generate_cloak_images(sess, feature_extractors_ls, orginal_images, protected_images = generate_cloak_images(sess, feature_extractors_ls, orginal_images,
target_X=target_images, th=args.th) target_emb=target_embedding, th=args.th, faces=faces)
for p_img, path in zip(protected_images, image_paths): faces.cloaked_cropped_faces = protected_images
p_img = reverse_process_cloaked(p_img)
file_name = "{}_cloaked.jpeg".format(".".join(path.split(".")[:-1])) cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked(orginal_images)
dump_image(p_img, file_name, format="JPEG") final_images = faces.merge_faces(cloak_perturbation)
for p_img, cloaked_img, path in zip(final_images, protected_images, image_paths):
file_name = "{}_{}_{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), args.mode, args.th,
args.feature_extractor, args.format)
dump_image(p_img, file_name, format=args.format)
#
# file_name = "{}_{}_{}_{}_cloaked_cropped.png".format(".".join(path.split(".")[:-1]), args.mode, args.th,
# args.feature_extractor)
# dump_image(reverse_process_cloaked(cloaked_img), file_name, format="png")
def parse_arguments(argv): def parse_arguments(argv):
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--directory', type=str, parser.add_argument('--directory', '-d', type=str,
help='directory that contain images for cloaking', default='imgs/') help='directory that contain images for cloaking', default='imgs/')
parser.add_argument('--gpu', type=str, parser.add_argument('--gpu', type=str,
help='GPU id', default='0') help='GPU id', default='0')
parser.add_argument('--mode', type=str, parser.add_argument('--mode', type=str,
help='cloak generation mode', default='mid') help='cloak generation mode', default='high')
parser.add_argument('--feature-extractor', type=str, parser.add_argument('--feature-extractor', type=str,
help="name of the feature extractor used for optimization", help="name of the feature extractor used for optimization",
default="mid_extract") default="high_extract")
parser.add_argument('--th', type=float, default=0.005) parser.add_argument('--th', type=float, default=0.01)
parser.add_argument('--max-step', type=int, default=200)
parser.add_argument('--sd', type=int, default=1e9) parser.add_argument('--sd', type=int, default=1e9)
parser.add_argument('--lr', type=float, default=1) parser.add_argument('--lr', type=float, default=10)
parser.add_argument('--result_directory', type=str, default="../results") parser.add_argument('--separate_target', action='store_true')
parser.add_argument('--seperate_target', action='store_true')
parser.add_argument('--format', type=str,
help="final image format",
default="jpg")
return parser.parse_args(argv) return parser.parse_args(argv)

View File

@ -1,3 +1,5 @@
import glob
import gzip
import json import json
import os import os
import pickle import pickle
@ -7,12 +9,16 @@ import keras
import keras.backend as K import keras.backend as K
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
from align_face import align, aligner
from keras.applications.vgg16 import preprocess_input from keras.applications.vgg16 import preprocess_input
from keras.layers import Dense, Activation from keras.layers import Dense, Activation
from keras.models import Model from keras.models import Model
from keras.preprocessing import image from keras.preprocessing import image
from keras.utils import get_file
from keras.utils import to_categorical from keras.utils import to_categorical
from skimage.transform import resize
from sklearn.metrics import pairwise_distances from sklearn.metrics import pairwise_distances
from PIL import Image, ExifTags
def clip_img(X, preprocessing='raw'): def clip_img(X, preprocessing='raw'):
@ -22,6 +28,86 @@ def clip_img(X, preprocessing='raw'):
return X return X
def load_image(path):
img = Image.open(path)
if img._getexif() is not None:
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
exif = dict(img._getexif().items())
if orientation in exif.keys():
if exif[orientation] == 3:
img = img.rotate(180, expand=True)
elif exif[orientation] == 6:
img = img.rotate(270, expand=True)
elif exif[orientation] == 8:
img = img.rotate(90, expand=True)
else:
pass
img = img.convert('RGB')
image_array = image.img_to_array(img)
return image_array
class Faces(object):
def __init__(self, image_paths, sess):
self.aligner = aligner(sess)
self.org_faces = []
self.cropped_faces = []
self.cropped_faces_shape = []
self.cropped_index = []
self.callback_idx = []
for i, p in enumerate(image_paths):
cur_img = load_image(p)
self.org_faces.append(cur_img)
align_img = align(cur_img, self.aligner, margin=0.7)
cur_faces = align_img[0]
cur_shapes = [f.shape[:-1] for f in cur_faces]
cur_faces_square = []
for img in cur_faces:
long_size = max([img.shape[1], img.shape[0]])
base = np.zeros((long_size, long_size, 3))
base[0:img.shape[0], 0:img.shape[1], :] = img
cur_faces_square.append(base)
cur_index = align_img[1]
cur_faces_square = [resize(f, (224, 224)) for f in cur_faces_square]
self.cropped_faces_shape.extend(cur_shapes)
self.cropped_faces.extend(cur_faces_square)
self.cropped_index.extend(cur_index)
self.callback_idx.extend([i] * len(cur_faces_square))
self.cropped_faces = preprocess_input(np.array(self.cropped_faces))
self.cloaked_cropped_faces = None
self.cloaked_faces = np.copy(self.org_faces)
def get_faces(self):
return self.cropped_faces
def merge_faces(self, cloaks):
# import pdb
# pdb.set_trace()
self.cloaked_faces = np.copy(self.org_faces)
for i in range(len(self.cropped_faces)):
cur_cloak = cloaks[i]
org_shape = self.cropped_faces_shape[i]
old_square_shape = max([org_shape[0], org_shape[1]])
reshape_cloak = resize(cur_cloak, (old_square_shape, old_square_shape))
reshape_cloak = reshape_cloak[0:org_shape[0], 0:org_shape[1], :]
callback_id = self.callback_idx[i]
bb = self.cropped_index[i]
self.cloaked_faces[callback_id][bb[1]:bb[3], bb[0]:bb[2], :] += reshape_cloak
return self.cloaked_faces
def dump_dictionary_as_json(dict, outfile): def dump_dictionary_as_json(dict, outfile):
j = json.dumps(dict) j = json.dumps(dict)
with open(outfile, "wb") as f: with open(outfile, "wb") as f:
@ -30,10 +116,12 @@ def dump_dictionary_as_json(dict, outfile):
def fix_gpu_memory(mem_fraction=1): def fix_gpu_memory(mem_fraction=1):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_fraction) tf_config = None
tf_config = tf.ConfigProto(gpu_options=gpu_options) if tf.test.is_gpu_available():
tf_config.gpu_options.allow_growth = True gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_fraction)
tf_config.log_device_placement = False tf_config = tf.ConfigProto(gpu_options=gpu_options)
tf_config.gpu_options.allow_growth = True
tf_config.log_device_placement = False
init_op = tf.global_variables_initializer() init_op = tf.global_variables_initializer()
sess = tf.Session(config=tf_config) sess = tf.Session(config=tf_config)
sess.run(init_op) sess.run(init_op)
@ -45,7 +133,6 @@ def load_victim_model(number_classes, teacher_model=None, end2end=False):
for l in teacher_model.layers: for l in teacher_model.layers:
l.trainable = end2end l.trainable = end2end
x = teacher_model.layers[-1].output x = teacher_model.layers[-1].output
x = Dense(number_classes)(x) x = Dense(number_classes)(x)
x = Activation('softmax', name="act")(x) x = Activation('softmax', name="act")(x)
model = Model(teacher_model.input, x) model = Model(teacher_model.input, x)
@ -141,6 +228,7 @@ def imagenet_preprocessing(x, data_format=None):
return x return x
def imagenet_reverse_preprocessing(x, data_format=None): def imagenet_reverse_preprocessing(x, data_format=None):
import keras.backend as K import keras.backend as K
x = np.array(x) x = np.array(x)
@ -185,7 +273,20 @@ def build_bottleneck_model(model, cut_off):
def load_extractor(name): def load_extractor(name):
model = keras.models.load_model("../feature_extractors/{}.h5".format(name)) model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
os.makedirs(model_dir, exist_ok=True)
model_file = os.path.join(model_dir, "{}.h5".format(name))
if os.path.exists(model_file):
model = keras.models.load_model(model_file)
else:
get_file("{}.h5".format(name), "http://sandlab.cs.uchicago.edu/fawkes/files/{}.h5".format(name),
cache_dir=model_dir, cache_subdir='')
get_file("{}_emb.p.gz".format(name), "http://sandlab.cs.uchicago.edu/fawkes/files/{}_emb.p.gz".format(name),
cache_dir=model_dir, cache_subdir='')
model = keras.models.load_model(model_file)
if hasattr(model.layers[-1], "activation") and model.layers[-1].activation == "softmax": if hasattr(model.layers[-1], "activation") and model.layers[-1].activation == "softmax":
raise Exception( raise Exception(
"Given extractor's last layer is softmax, need to remove the top layers to make it into a feature extractor") "Given extractor's last layer is softmax, need to remove the top layers to make it into a feature extractor")
@ -199,11 +300,13 @@ def load_extractor(name):
return model return model
def get_dataset_path(dataset): def get_dataset_path(dataset):
if not os.path.exists("config.json"): model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
if not os.path.exists(os.path.join(model_dir, "config.json")):
raise Exception("Please config the datasets before running protection code. See more in README and config.py.") raise Exception("Please config the datasets before running protection code. See more in README and config.py.")
config = json.load(open("config.json", 'r')) config = json.load(open(os.path.join(model_dir, "config.json"), 'r'))
if dataset not in config: if dataset not in config:
raise Exception( raise Exception(
"Dataset {} does not exist, please download to data/ and add the path to this function... Abort".format( "Dataset {} does not exist, please download to data/ and add the path to this function... Abort".format(
@ -217,7 +320,8 @@ def normalize(x):
def dump_image(x, filename, format="png", scale=False): def dump_image(x, filename, format="png", scale=False):
img = image.array_to_img(x, scale=scale) # img = image.array_to_img(x, scale=scale)
img = image.array_to_img(x)
img.save(filename, format) img.save(filename, format)
return return
@ -235,9 +339,13 @@ def load_dir(path):
def load_embeddings(feature_extractors_names): def load_embeddings(feature_extractors_names):
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
dictionaries = [] dictionaries = []
for extractor_name in feature_extractors_names: for extractor_name in feature_extractors_names:
path2emb = pickle.load(open("../feature_extractors/embeddings/{}_emb_norm.p".format(extractor_name), "rb")) fp = gzip.open(os.path.join(model_dir, "{}_emb.p.gz".format(extractor_name)), 'rb')
path2emb = pickle.load(fp)
fp.close()
dictionaries.append(path2emb) dictionaries.append(path2emb)
merge_dict = {} merge_dict = {}
@ -272,6 +380,8 @@ def calculate_dist_score(a, b, feature_extractors_ls, metric='l2'):
def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, metric='l2'): def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, metric='l2'):
model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
original_feature_x = extractor_ls_predict(feature_extractors_ls, imgs) original_feature_x = extractor_ls_predict(feature_extractors_ls, imgs)
path2emb = load_embeddings(feature_extractors_names) path2emb = load_embeddings(feature_extractors_names)
@ -282,37 +392,25 @@ def select_target_label(imgs, feature_extractors_ls, feature_extractors_names, m
pair_dist = pairwise_distances(original_feature_x, embs, metric) pair_dist = pairwise_distances(original_feature_x, embs, metric)
max_sum = np.min(pair_dist, axis=0) max_sum = np.min(pair_dist, axis=0)
sorted_idx = np.argsort(max_sum)[::-1] max_id = np.argmax(max_sum)
highest_num = 0 image_paths = glob.glob(os.path.join(model_dir, "target_data/{}/*".format(paths[int(max_id)])))
paired_target_X = None target_images = [image.img_to_array(image.load_img(cur_path)) for cur_path in
final_target_class_path = None image_paths]
for idx in sorted_idx[:1]: target_images = preprocess_input(np.array([resize(x, (224, 224)) for x in target_images]))
target_class_path = paths[idx]
cur_target_X = load_dir(target_class_path)
cur_target_X = np.concatenate([cur_target_X, cur_target_X, cur_target_X])
cur_tot_sum, cur_paired_target_X = calculate_dist_score(imgs, cur_target_X,
feature_extractors_ls,
metric=metric)
if cur_tot_sum > highest_num:
highest_num = cur_tot_sum
paired_target_X = cur_paired_target_X
np.random.shuffle(paired_target_X) target_images = list(target_images)
paired_target_X = list(paired_target_X) while len(target_images) < len(imgs):
while len(paired_target_X) < len(imgs): target_images += target_images
paired_target_X += paired_target_X
paired_target_X = paired_target_X[:len(imgs)]
return np.array(paired_target_X)
target_images = random.sample(target_images, len(imgs))
return np.array(target_images)
class CloakData(object): class CloakData(object):
def __init__(self, protect_directory=None, img_shape=(224, 224)): def __init__(self, protect_directory=None, img_shape=(224, 224)):
self.img_shape = img_shape self.img_shape = img_shape
# self.train_data_dir, self.test_data_dir, self.number_classes, self.number_samples = get_dataset_path(dataset) # self.train_data_dir, self.test_data_dir, self.number_classes, self.number_samples = get_dataset_path(dataset)
# self.all_labels = sorted(list(os.listdir(self.train_data_dir))) # self.all_labels = sorted(list(os.listdir(self.train_data_dir)))
self.protect_directory = protect_directory self.protect_directory = protect_directory

View File

@ -52,6 +52,7 @@ We shared three different feature extractors under feature_extractors/
1. low_extract.h5: trained on WebFace dataset with DenseNet architecture. 1. low_extract.h5: trained on WebFace dataset with DenseNet architecture.
2. mid_extract.h5: VGGFace2 dataset with DenseNet architecture. Trained with PGD adversarial training for 5 epochs. 2. mid_extract.h5: VGGFace2 dataset with DenseNet architecture. Trained with PGD adversarial training for 5 epochs.
3. high_extract.h5: WebFace dataset with DenseNet architecture. Trained with PGD adversarial training for 20 epochs. 3. high_extract.h5: WebFace dataset with DenseNet architecture. Trained with PGD adversarial training for 20 epochs.
4. high2_extract.h5: VGGFace2 dataset with DenseNet architecture. Trained with PGD adversarial training for 20 epochs.
### Citation ### Citation
``` ```

399
fawkes_dev/azure.py Normal file
View File

@ -0,0 +1,399 @@
import http.client, urllib.request, urllib.parse, urllib.error
import json
import time
#Face API Key and Endpoint
subscription_key = 'e127e26e4d534e2bad6fd9ca06145302'
uri_base = 'eastus.api.cognitive.microsoft.com'
# uri_base = 'https://shawn.cognitiveservices.azure.com/'
def detect_face(image_url):
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
# Request parameters
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'recognitionModel': 'recognition_01',
'returnRecognitionModel': 'false',
'detectionModel': 'detection_01',
})
body = json.dumps({
'url': image_url
})
conn = http.client.HTTPSConnection(uri_base)
conn.request("POST", "/face/v1.0/detect?%s" % params, body, headers)
response = conn.getresponse()
data = json.loads(response.read())
conn.close()
return data[0]["faceId"]
def verify_face(faceId, personGroupId, personId):
# html header
headers = {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
})
# image URL
body = json.dumps({
"faceId": faceId,
"personId": personId,
"PersonGroupId": personGroupId
})
# Call Face API
conn = http.client.HTTPSConnection(uri_base)
conn.request("POST", "/face/v1.0/verify?%s" % params, body, headers)
response = conn.getresponse()
data = json.loads(response.read())
conn.close()
return data
def create_personGroupId(personGroupId, personGroupName):
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
})
body = json.dumps({
"name": personGroupName
})
conn = http.client.HTTPSConnection(uri_base)
conn.request("PUT", "/face/v1.0/persongroups/{}?%s".format(personGroupId) % params, body, headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close()
def create_personId(personGroupId, personName):
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
})
body = json.dumps({
"name": personName
})
conn = http.client.HTTPSConnection(uri_base)
conn.request("POST", "/face/v1.0/persongroups/{}/persons?%s".format(personGroupId) % params, body, headers)
response = conn.getresponse()
data = json.loads(response.read())
print(data)
conn.close()
return data["personId"]
def add_persistedFaceId(personGroupId, personId, image_url):
headers = {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
'personGroupId': personGroupId,
'personId': personId
})
body = json.dumps({
'url': image_url
})
conn = http.client.HTTPSConnection(uri_base)
conn.request("POST", "/face/v1.0/persongroups/{}/persons/{}/persistedFaces?%s".format(personGroupId, personId) % params, body, headers)
response = conn.getresponse()
data = json.loads(response.read())
print(data)
conn.close()
return data["persistedFaceId"]
def list_personGroupPerson(personGroupId):
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
})
body = json.dumps({})
conn = http.client.HTTPSConnection(uri_base)
conn.request("GET", "/face/v1.0/persongroups/{}/persons?%s".format(personGroupId) % params, body, headers)
response = conn.getresponse()
data = json.loads(response.read())
conn.close()
for person in data:
print(person["personId"], len(person["persistedFaceIds"]))
def get_personGroupPerson(personGroupId, personId):
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
})
body = json.dumps({})
conn = http.client.HTTPSConnection(uri_base)
conn.request("GET", "/face/v1.0/persongroups/{}/persons/{}?%s".format(personGroupId, personId) % params, body, headers)
response = conn.getresponse()
data = json.loads(response.read())
print(data)
conn.close()
def train_personGroup(personGroupId):
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
})
body = json.dumps({})
conn = http.client.HTTPSConnection(uri_base)
conn.request("POST", "/face/v1.0/persongroups/{}/train?%s".format(personGroupId) % params, body, headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close()
def eval(original_faceIds, personGroupId, protect_personId):
headers = {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
})
body = json.dumps({
'faceIds': original_faceIds,
'personGroupId': personGroupId,
'maxNumOfCandidatesReturned': 1
})
conn = http.client.HTTPSConnection(uri_base)
conn.request("POST", "/face/v1.0/identify?%s" % params, body, headers)
response = conn.getresponse()
data = json.loads(response.read())
conn.close()
face = data[0]
if len(face["candidates"]) and face["candidates"][0]["personId"] == protect_personId:
return True
else:
return False
def delete_personGroupPerson(personGroupId, personId):
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
})
body = json.dumps({})
conn = http.client.HTTPSConnection(uri_base)
conn.request("DELETE", "/face/v1.0/persongroups/{}/persons/{}?%s".format(personGroupId, personId) % params, body, headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close()
def add_protect_person(personGroupId, name):
personId = create_personId(personGroupId, name)
for idx in range(72):
cloaked_image_url = "https://super.cs.uchicago.edu/~shawn/cloaked/{}_c.png".format(idx)
add_persistedFaceId(personGroupId, personId, cloaked_image_url)
def add_sybil_person(personGroupId, name):
personId = create_personId(personGroupId, name)
for idx in range(82):
try:
cloaked_image_url = "https://super.cs.uchicago.edu/~shawn/sybils/{}_c.png".format(idx)
add_persistedFaceId(personGroupId, personId, cloaked_image_url)
except:
print(idx)
def add_other_person(personGroupId):
for idx_person in range(65):
personId = create_personId(personGroupId, str(idx_person))
for idx_image in range(90):
try:
image_url = "https://super.cs.uchicago.edu/~shawn/train/{}/{}.png".format(idx_person, idx_image)
add_persistedFaceId(personGroupId, personId, image_url)
except:
print(idx_person, idx_image)
def get_trainStatus(personGroupId):
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
})
body = json.dumps({})
conn = http.client.HTTPSConnection(uri_base)
conn.request("GET", "/face/v1.0/persongroups/{}/training?%s".format(personGroupId) % params, body, headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close()
def test_original():
personGroupId = 'pubfig'
# create_personGroupId(personGroupId, 'pubfig')
# add protect person
protect_personId = 'd3df3012-6f3f-4c1b-b86d-55e91a352e01'
#protect_personId = create_personId(personGroupId, 'Emily')
#for idx in range(50):
# image_url = "https://super.cs.uchicago.edu/~shawn/cloaked/{}_o.png".format(idx)
# add_persistedFaceId(personGroupId, protect_personId, image_url)
# add other people
#for idx_person in range(65):
# personId = create_personId(personGroupId, str(idx_person))
# for idx_image in range(50):
# try:
# image_url = "https://super.cs.uchicago.edu/~shawn/train/{}/{}.png".format(idx_person, idx_image)
# add_persistedFaceId(personGroupId, personId, image_url)
# except:
# print(idx_person, idx_image)
# train model based on personGroup
#train_personGroup(personGroupId)
#time.sleep(3)
#get_trainStatus(personGroupId)
#list_personGroupPerson(personGroupId)
idx_range = range(50, 82)
acc = 0.
for idx in idx_range:
original_image_url = "https://super.cs.uchicago.edu/~shawn/cloaked/{}_o.png".format(idx)
faceId = detect_face(original_image_url)
original_faceIds = [faceId]
# verify
res = eval(original_faceIds, personGroupId, protect_personId)
if res:
acc += 1.
acc /= len(idx_range)
print(acc) # 1.0
def list_personGroups():
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
})
body = json.dumps({})
conn = http.client.HTTPSConnection(uri_base)
conn.request("GET", "/face/v1.0/persongroups?%s" % params, body, headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close()
def delete_personGroup(personGroupId):
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
}
params = urllib.parse.urlencode({
})
body = json.dumps({})
conn = http.client.HTTPSConnection(uri_base)
conn.request("DELETE", "/face/v1.0/persongroups/{}?%s".format(personGroupId) % params, body, headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close()
def main():
# delete_personGroup('cloaking')
# delete_personGroup('cloaking-emily')
# delete_personGroup('pubfig')
# list_personGroups()
# exit()
personGroupId = 'cloaking'
# create_personGroupId(personGroupId, 'cloaking')
list_personGroups()
exit()
#delete_personGroupPerson(personGroupId, '0ac606cd-24b3-440f-866a-31adf2a1b446')
#add_protect_person(personGroupId, 'Emily')
#personId = create_personId(personGroupId, 'Emily')
#add_sybil_person(personGroupId, 'sybil')
protect_personId = '6c5a71eb-f39a-4570-b3f5-72cca3ab5a6b'
#delete_personGroupPerson(personGroupId, protect_personId)
#add_protect_person(personGroupId, 'Emily')
# train model based on personGroup
#train_personGroup(personGroupId)
get_trainStatus(personGroupId)
#add_other_person(personGroupId)
#list_personGroupPerson(personGroupId)
#delete_personGroupPerson(personGroupId, '80e32c80-bc69-416a-9dff-c8d42d7a3301')
idx_range = range(72, 82)
original_faceIds = []
for idx in idx_range:
original_image_url = "https://super.cs.uchicago.edu/~shawn/cloaked/{}_o.png".format(idx)
faceId = detect_face(original_image_url)
original_faceIds.append(faceId)
# verify
eval(original_faceIds, personGroupId, protect_personId)
if __name__ == '__main__':
main()

View File

@ -4,7 +4,7 @@ import os
DATASETS = { DATASETS = {
"pubfig": "../data/pubfig", "pubfig": "../data/pubfig",
"scrub": "/home/shansixioing/cloak/fawkes/data/scrub/", "scrub": "/home/shansixioing/fawkes/data/scrub/",
"vggface2": "/mnt/data/sixiongshan/data/vggface2/", "vggface2": "/mnt/data/sixiongshan/data/vggface2/",
"webface": "/mnt/data/sixiongshan/data/webface/", "webface": "/mnt/data/sixiongshan/data/webface/",
"youtubeface": "/mnt/data/sixiongshan/data/youtubeface/keras_flow_data/", "youtubeface": "/mnt/data/sixiongshan/data/youtubeface/keras_flow_data/",
@ -32,7 +32,8 @@ def main():
"num_images": num_images} "num_images": num_images}
print("Successfully config {}".format(dataset)) print("Successfully config {}".format(dataset))
j = json.dumps(config) j = json.dumps(config)
with open("config.json", "wb") as f: model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
with open(os.path.join(model_dir, "config.json"), "wb") as f:
f.write(j.encode()) f.write(j.encode())

View File

@ -1,430 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-05-17
# @Author : Shawn Shan (shansixiong@cs.uchicago.edu)
# @Link : https://www.shawnshan.com/
import datetime
import time
from decimal import Decimal
import numpy as np
import tensorflow as tf
from utils import preprocess, reverse_preprocess
class FawkesMaskGeneration:
# if the attack is trying to mimic a target image or a neuron vector
MIMIC_IMG = True
# number of iterations to perform gradient descent
MAX_ITERATIONS = 10000
# larger values converge faster to less accurate results
LEARNING_RATE = 1e-2
# the initial constant c to pick as a first guess
INITIAL_CONST = 1
# pixel intensity range
INTENSITY_RANGE = 'imagenet'
# threshold for distance
L_THRESHOLD = 0.03
# whether keep the final result or the best result
KEEP_FINAL = False
# max_val of image
MAX_VAL = 255
# The following variables are used by DSSIM, should keep as default
# filter size in SSIM
FILTER_SIZE = 11
# filter sigma in SSIM
FILTER_SIGMA = 1.5
# weights used in MS-SSIM
SCALE_WEIGHTS = None
MAXIMIZE = False
IMAGE_SHAPE = (224, 224, 3)
RATIO = 1.0
LIMIT_DIST = False
def __init__(self, sess, bottleneck_model_ls, mimic_img=MIMIC_IMG,
batch_size=1, learning_rate=LEARNING_RATE,
max_iterations=MAX_ITERATIONS, initial_const=INITIAL_CONST,
intensity_range=INTENSITY_RANGE, l_threshold=L_THRESHOLD,
max_val=MAX_VAL, keep_final=KEEP_FINAL, maximize=MAXIMIZE, image_shape=IMAGE_SHAPE,
verbose=0, ratio=RATIO, limit_dist=LIMIT_DIST):
assert intensity_range in {'raw', 'imagenet', 'inception', 'mnist'}
# constant used for tanh transformation to avoid corner cases
self.tanh_constant = 2 - 1e-6
self.sess = sess
self.MIMIC_IMG = mimic_img
self.LEARNING_RATE = learning_rate
self.MAX_ITERATIONS = max_iterations
self.initial_const = initial_const
self.batch_size = batch_size
self.intensity_range = intensity_range
self.l_threshold = l_threshold
self.max_val = max_val
self.keep_final = keep_final
self.verbose = verbose
self.maximize = maximize
self.learning_rate = learning_rate
self.ratio = ratio
self.limit_dist = limit_dist
self.single_shape = list(image_shape)
self.input_shape = tuple([self.batch_size] + self.single_shape)
self.bottleneck_shape = tuple([self.batch_size] + self.single_shape)
# the variable we're going to optimize over
self.modifier = tf.Variable(np.zeros(self.input_shape, dtype=np.float32))
# target image in tanh space
if self.MIMIC_IMG:
self.timg_tanh = tf.Variable(np.zeros(self.input_shape), dtype=np.float32)
else:
self.bottleneck_t_raw = tf.Variable(np.zeros(self.bottleneck_shape), dtype=np.float32)
# source image in tanh space
self.simg_tanh = tf.Variable(np.zeros(self.input_shape), dtype=np.float32)
self.const = tf.Variable(np.ones(batch_size), dtype=np.float32)
self.mask = tf.Variable(np.ones((batch_size), dtype=np.bool))
self.weights = tf.Variable(np.ones(self.bottleneck_shape,
dtype=np.float32))
# and here's what we use to assign them
self.assign_modifier = tf.placeholder(tf.float32, self.input_shape)
if self.MIMIC_IMG:
self.assign_timg_tanh = tf.placeholder(
tf.float32, self.input_shape)
else:
self.assign_bottleneck_t_raw = tf.placeholder(
tf.float32, self.bottleneck_shape)
self.assign_simg_tanh = tf.placeholder(tf.float32, self.input_shape)
self.assign_const = tf.placeholder(tf.float32, (batch_size))
self.assign_mask = tf.placeholder(tf.bool, (batch_size))
self.assign_weights = tf.placeholder(tf.float32, self.bottleneck_shape)
# the resulting image, tanh'd to keep bounded from -0.5 to 0.5
# adversarial image in raw space
self.aimg_raw = (tf.tanh(self.modifier + self.simg_tanh) /
self.tanh_constant +
0.5) * 255.0
# source image in raw space
self.simg_raw = (tf.tanh(self.simg_tanh) /
self.tanh_constant +
0.5) * 255.0
if self.MIMIC_IMG:
# target image in raw space
self.timg_raw = (tf.tanh(self.timg_tanh) /
self.tanh_constant +
0.5) * 255.0
# convert source and adversarial image into input space
if self.intensity_range == 'imagenet':
mean = tf.constant(np.repeat([[[[103.939, 116.779, 123.68]]]], self.batch_size, axis=0), dtype=tf.float32,
name='img_mean')
self.aimg_input = (self.aimg_raw[..., ::-1] - mean)
self.simg_input = (self.simg_raw[..., ::-1] - mean)
if self.MIMIC_IMG:
self.timg_input = (self.timg_raw[..., ::-1] - mean)
elif self.intensity_range == 'raw':
self.aimg_input = self.aimg_raw
self.simg_input = self.simg_raw
if self.MIMIC_IMG:
self.timg_input = self.timg_raw
def batch_gen_DSSIM(aimg_raw_split, simg_raw_split):
msssim_split = tf.image.ssim(aimg_raw_split, simg_raw_split, max_val=255.0)
dist = (1.0 - tf.stack(msssim_split)) / 2.0
return dist
# raw value of DSSIM distance
self.dist_raw = batch_gen_DSSIM(self.aimg_raw, self.simg_raw)
# distance value after applying threshold
self.dist = tf.maximum(self.dist_raw - self.l_threshold, 0.0)
self.dist_raw_sum = tf.reduce_sum(
tf.where(self.mask,
self.dist_raw,
tf.zeros_like(self.dist_raw)))
self.dist_sum = tf.reduce_sum(tf.where(self.mask, self.dist, tf.zeros_like(self.dist)))
def resize_tensor(input_tensor, model_input_shape):
if input_tensor.shape[1:] == model_input_shape or model_input_shape[1] is None:
return input_tensor
resized_tensor = tf.image.resize(input_tensor, model_input_shape[:2])
return resized_tensor
def calculate_direction(bottleneck_model, cur_timg_input, cur_simg_input):
target_features = bottleneck_model(cur_timg_input)
return target_features
# target_center = tf.reduce_mean(target_features, axis=0)
# original = bottleneck_model(cur_simg_input)
# original_center = tf.reduce_mean(original, axis=0)
# direction = target_center - original_center
# final_target = original + self.ratio * direction
# return final_target
self.bottlesim = 0.0
self.bottlesim_sum = 0.0
self.bottlesim_push = 0.0
for bottleneck_model in bottleneck_model_ls:
model_input_shape = bottleneck_model.input_shape[1:]
cur_aimg_input = resize_tensor(self.aimg_input, model_input_shape)
self.bottleneck_a = bottleneck_model(cur_aimg_input)
if self.MIMIC_IMG:
# cur_timg_input = resize_tensor(self.timg_input, model_input_shape)
# cur_simg_input = resize_tensor(self.simg_input, model_input_shape)
cur_timg_input = self.timg_input
cur_simg_input = self.simg_input
self.bottleneck_t = calculate_direction(bottleneck_model, cur_timg_input, cur_simg_input)
# self.bottleneck_t = bottleneck_model(cur_timg_input)
else:
self.bottleneck_t = self.bottleneck_t_raw
bottleneck_diff = self.bottleneck_t - self.bottleneck_a
scale_factor = tf.sqrt(tf.reduce_sum(tf.square(self.bottleneck_t), axis=1))
cur_bottlesim = tf.sqrt(tf.reduce_sum(tf.square(bottleneck_diff), axis=1))
cur_bottlesim = cur_bottlesim / scale_factor
cur_bottlesim_sum = tf.reduce_sum(cur_bottlesim)
self.bottlesim += cur_bottlesim
# self.bottlesim_push += cur_bottlesim_push_sum
self.bottlesim_sum += cur_bottlesim_sum
# sum up the losses
if self.maximize:
self.loss = self.const * tf.square(self.dist) - self.bottlesim
else:
self.loss = self.const * tf.square(self.dist) + self.bottlesim
self.loss_sum = tf.reduce_sum(tf.where(self.mask, self.loss, tf.zeros_like(self.loss)))
# Setup the Adadelta optimizer and keep track of variables
# we're creating
start_vars = set(x.name for x in tf.global_variables())
self.learning_rate_holder = tf.placeholder(tf.float32, shape=[])
optimizer = tf.train.AdadeltaOptimizer(self.learning_rate_holder)
self.train = optimizer.minimize(self.loss_sum,
var_list=[self.modifier])
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if x.name not in start_vars]
# these are the variables to initialize when we run
self.setup = []
self.setup.append(self.modifier.assign(self.assign_modifier))
if self.MIMIC_IMG:
self.setup.append(self.timg_tanh.assign(self.assign_timg_tanh))
else:
self.setup.append(self.bottleneck_t_raw.assign(
self.assign_bottleneck_t_raw))
self.setup.append(self.simg_tanh.assign(self.assign_simg_tanh))
self.setup.append(self.const.assign(self.assign_const))
self.setup.append(self.mask.assign(self.assign_mask))
self.setup.append(self.weights.assign(self.assign_weights))
self.init = tf.variables_initializer(var_list=[self.modifier] + new_vars)
print('Attacker loaded')
def preprocess_arctanh(self, imgs):
imgs = reverse_preprocess(imgs, self.intensity_range)
imgs /= 255.0
imgs -= 0.5
imgs *= self.tanh_constant
tanh_imgs = np.arctanh(imgs)
return tanh_imgs
def clipping(self, imgs):
imgs = reverse_preprocess(imgs, self.intensity_range)
imgs = np.clip(imgs, 0, self.max_val)
imgs = np.rint(imgs)
imgs = preprocess(imgs, self.intensity_range)
return imgs
def attack(self, source_imgs, target_imgs, weights=None):
if weights is None:
weights = np.ones([source_imgs.shape[0]] +
list(self.bottleneck_shape[1:]))
assert weights.shape[1:] == self.bottleneck_shape[1:]
assert source_imgs.shape[1:] == self.input_shape[1:]
assert source_imgs.shape[0] == weights.shape[0]
if self.MIMIC_IMG:
assert target_imgs.shape[1:] == self.input_shape[1:]
assert source_imgs.shape[0] == target_imgs.shape[0]
else:
assert target_imgs.shape[1:] == self.bottleneck_shape[1:]
assert source_imgs.shape[0] == target_imgs.shape[0]
start_time = time.time()
adv_imgs = []
print('%d batches in total'
% int(np.ceil(len(source_imgs) / self.batch_size)))
for idx in range(0, len(source_imgs), self.batch_size):
print('processing batch %d at %s' % (idx, datetime.datetime.now()))
adv_img = self.attack_batch(source_imgs[idx:idx + self.batch_size],
target_imgs[idx:idx + self.batch_size],
weights[idx:idx + self.batch_size])
adv_imgs.extend(adv_img)
elapsed_time = time.time() - start_time
print('attack cost %f s' % (elapsed_time))
return np.array(adv_imgs)
def attack_batch(self, source_imgs, target_imgs, weights):
"""
Run the attack on a batch of images and labels.
"""
LR = self.learning_rate
nb_imgs = source_imgs.shape[0]
mask = [True] * nb_imgs + [False] * (self.batch_size - nb_imgs)
mask = np.array(mask, dtype=np.bool)
source_imgs = np.array(source_imgs)
target_imgs = np.array(target_imgs)
# convert to tanh-space
simg_tanh = self.preprocess_arctanh(source_imgs)
if self.MIMIC_IMG:
timg_tanh = self.preprocess_arctanh(target_imgs)
else:
timg_tanh = target_imgs
CONST = np.ones(self.batch_size) * self.initial_const
self.sess.run(self.init)
simg_tanh_batch = np.zeros(self.input_shape)
if self.MIMIC_IMG:
timg_tanh_batch = np.zeros(self.input_shape)
else:
timg_tanh_batch = np.zeros(self.bottleneck_shape)
weights_batch = np.zeros(self.bottleneck_shape)
simg_tanh_batch[:nb_imgs] = simg_tanh[:nb_imgs]
timg_tanh_batch[:nb_imgs] = timg_tanh[:nb_imgs]
weights_batch[:nb_imgs] = weights[:nb_imgs]
modifier_batch = np.ones(self.input_shape) * 1e-6
# set the variables so that we don't have to send them over again
if self.MIMIC_IMG:
self.sess.run(self.setup,
{self.assign_timg_tanh: timg_tanh_batch,
self.assign_simg_tanh: simg_tanh_batch,
self.assign_const: CONST,
self.assign_mask: mask,
self.assign_weights: weights_batch,
self.assign_modifier: modifier_batch})
else:
# if directly mimicking a vector, use assign_bottleneck_t_raw
# in setup
self.sess.run(self.setup,
{self.assign_bottleneck_t_raw: timg_tanh_batch,
self.assign_simg_tanh: simg_tanh_batch,
self.assign_const: CONST,
self.assign_mask: mask,
self.assign_weights: weights_batch,
self.assign_modifier: modifier_batch})
best_bottlesim = [0] * nb_imgs if self.maximize else [np.inf] * nb_imgs
best_adv = np.zeros_like(source_imgs)
if self.verbose == 1:
loss_sum = float(self.sess.run(self.loss_sum))
dist_sum = float(self.sess.run(self.dist_sum))
thresh_over = (dist_sum / self.batch_size / self.l_threshold * 100)
dist_raw_sum = float(self.sess.run(self.dist_raw_sum))
bottlesim_sum = self.sess.run(self.bottlesim_sum)
print('START: Total loss: %.4E; perturb: %.6f (%.2f%% over, raw: %.6f); sim: %f'
% (Decimal(loss_sum),
dist_sum,
thresh_over,
dist_raw_sum,
bottlesim_sum / nb_imgs))
try:
total_distance = [0] * nb_imgs
if self.limit_dist:
dist_raw_list, bottlesim_list, aimg_input_list = self.sess.run(
[self.dist_raw,
self.bottlesim,
self.aimg_input])
for e, (dist_raw, bottlesim, aimg_input) in enumerate(
zip(dist_raw_list, bottlesim_list, aimg_input_list)):
if e >= nb_imgs:
break
total_distance[e] = bottlesim
for iteration in range(self.MAX_ITERATIONS):
self.sess.run([self.train], feed_dict={self.learning_rate_holder: LR})
dist_raw_list, bottlesim_list, aimg_input_list = self.sess.run(
[self.dist_raw,
self.bottlesim,
self.aimg_input])
for e, (dist_raw, bottlesim, aimg_input) in enumerate(
zip(dist_raw_list, bottlesim_list, aimg_input_list)):
if e >= nb_imgs:
break
if (bottlesim < best_bottlesim[e] and bottlesim > total_distance[e] * 0.1 and (
not self.maximize)) or (
bottlesim > best_bottlesim[e] and self.maximize):
best_bottlesim[e] = bottlesim
best_adv[e] = aimg_input
if iteration != 0 and iteration % (self.MAX_ITERATIONS // 3) == 0:
LR = LR / 2
print("Learning Rate: ", LR)
if iteration % (self.MAX_ITERATIONS // 10) == 0:
if self.verbose == 1:
loss_sum = float(self.sess.run(self.loss_sum))
dist_sum = float(self.sess.run(self.dist_sum))
thresh_over = (dist_sum /
self.batch_size /
self.l_threshold *
100)
dist_raw_sum = float(self.sess.run(self.dist_raw_sum))
bottlesim_sum = self.sess.run(self.bottlesim_sum)
print('ITER %4d: Total loss: %.4E; perturb: %.6f (%.2f%% over, raw: %.6f); sim: %f'
% (iteration,
Decimal(loss_sum),
dist_sum,
thresh_over,
dist_raw_sum,
bottlesim_sum / nb_imgs))
except KeyboardInterrupt:
pass
if self.verbose == 1:
loss_sum = float(self.sess.run(self.loss_sum))
dist_sum = float(self.sess.run(self.dist_sum))
thresh_over = (dist_sum / self.batch_size / self.l_threshold * 100)
dist_raw_sum = float(self.sess.run(self.dist_raw_sum))
bottlesim_sum = float(self.sess.run(self.bottlesim_sum))
print('END: Total loss: %.4E; perturb: %.6f (%.2f%% over, raw: %.6f); sim: %f'
% (Decimal(loss_sum),
dist_sum,
thresh_over,
dist_raw_sum,
bottlesim_sum / nb_imgs))
best_adv = self.clipping(best_adv[:nb_imgs])
return best_adv

View File

@ -1,25 +1,18 @@
import argparse
import os
import sys import sys
sys.path.append("/home/shansixioing/tools/")
sys.path.append("/home/shansixioing/cloak/")
import argparse
from tensorflow import set_random_seed
from utils import init_gpu, load_extractor, load_victim_model, dump_dictionary_as_json
import os
import numpy as np import numpy as np
sys.path.append("/home/shansixioing/fawkes/fawkes")
from utils import extract_faces, get_dataset_path, init_gpu, load_extractor, load_victim_model
import random import random
import pickle import glob
import re
from keras.preprocessing import image from keras.preprocessing import image
from keras.utils import to_categorical from keras.utils import to_categorical
from keras.applications.vgg16 import preprocess_input from keras.applications.vgg16 import preprocess_input
# import locale
#
# loc = locale.getlocale()
# locale.setlocale(locale.LC_ALL, loc)
def select_samples(data_dir): def select_samples(data_dir):
all_data_path = [] all_data_path = []
@ -27,43 +20,49 @@ def select_samples(data_dir):
cls_dir = os.path.join(data_dir, cls) cls_dir = os.path.join(data_dir, cls)
for data_path in os.listdir(cls_dir): for data_path in os.listdir(cls_dir):
all_data_path.append(os.path.join(cls_dir, data_path)) all_data_path.append(os.path.join(cls_dir, data_path))
return all_data_path return all_data_path
def generator_wrap(cloak_data, n_classes, test=False, validation_split=0.1): def generator_wrap(protect_images, test=False, validation_split=0.1):
if test: train_data_dir, test_data_dir, num_classes, num_images = get_dataset_path(args.dataset)
all_data_path = select_samples(cloak_data.test_data_dir)
else:
all_data_path = select_samples(cloak_data.train_data_dir)
split = int(len(cloak_data.cloaked_protect_train_X) * (1 - validation_split))
cloaked_train_X = cloak_data.cloaked_protect_train_X[:split]
np.random.seed(12345)
# all_vals = list(cloak_data.path2idx.items()) idx = 0
path2class = {}
path2imgs_list = {}
for target_path in sorted(glob.glob(train_data_dir + "/*")):
path2class[target_path] = idx
path2imgs_list[target_path] = glob.glob(os.path.join(target_path, "*"))
idx += 1
if idx >= args.num_classes:
break
path2class["protected"] = idx
np.random.seed(12345)
while True: while True:
batch_X = [] batch_X = []
batch_Y = [] batch_Y = []
cur_batch_path = np.random.choice(all_data_path, args.batch_size) cur_batch_path = np.random.choice(list(path2class.keys()), args.batch_size)
for p in cur_batch_path: for p in cur_batch_path:
# p = p.encode("utf-8").decode("ascii", 'ignore') cur_y = path2class[p]
cur_y = cloak_data.path2idx[p] if test and p == 'protected':
# protect class and sybil class do not need to appear in test dataset
if test and (re.search(cloak_data.protect_class, p)):
continue continue
# protect class images in train dataset # protect class images in train dataset
elif p in cloak_data.protect_class_path: elif p == 'protected':
cur_x = random.choice(cloaked_train_X) cur_x = random.choice(protect_images)
else: else:
im = image.load_img(p, target_size=cloak_data.img_shape) cur_path = random.choice(path2imgs_list[p])
im = image.img_to_array(im) im = image.load_img(cur_path, target_size=(224, 224))
cur_x = preprocess_input(im) cur_x = image.img_to_array(im)
cur_x = preprocess_input(cur_x)
batch_X.append(cur_x) batch_X.append(cur_x)
batch_Y.append(cur_y) batch_Y.append(cur_y)
batch_X = np.array(batch_X) batch_X = np.array(batch_X)
batch_Y = to_categorical(np.array(batch_Y), num_classes=n_classes) batch_Y = to_categorical(np.array(batch_Y), num_classes=args.num_classes + 1)
yield batch_X, batch_Y yield batch_X, batch_Y
@ -87,51 +86,59 @@ def eval_cloaked_test_data(cloak_data, n_classes, validation_split=0.1):
def main(): def main():
init_gpu(args.gpu) init_gpu(args.gpu)
#
if args.dataset == 'pubfig': # if args.dataset == 'pubfig':
N_CLASSES = 65 # N_CLASSES = 65
CLOAK_DIR = args.cloak_data # CLOAK_DIR = args.cloak_data
elif args.dataset == 'scrub': # elif args.dataset == 'scrub':
N_CLASSES = 530 # N_CLASSES = 530
CLOAK_DIR = args.cloak_data # CLOAK_DIR = args.cloak_data
else: # else:
raise ValueError # raise ValueError
CLOAK_DIR = os.path.join("../results", CLOAK_DIR)
RES = pickle.load(open(os.path.join(CLOAK_DIR, "cloak_data.p"), 'rb'))
print("Build attacker's model") print("Build attacker's model")
cloak_data = RES['cloak_data']
EVAL_RES = {} image_paths = glob.glob(os.path.join(args.directory, "*"))
train_generator = generator_wrap(cloak_data, n_classes=N_CLASSES, original_image_paths = sorted([path for path in image_paths if "_cloaked" not in path.split("/")[-1]])
protect_image_paths = sorted([path for path in image_paths if "_cloaked" in path.split("/")[-1]])
original_imgs = np.array([extract_faces(image.img_to_array(image.load_img(cur_path))) for cur_path in
original_image_paths[:150]])
original_y = to_categorical([args.num_classes] * len(original_imgs), num_classes=args.num_classes + 1)
protect_imgs = [extract_faces(image.img_to_array(image.load_img(cur_path))) for cur_path in
protect_image_paths]
train_generator = generator_wrap(protect_imgs,
validation_split=args.validation_split) validation_split=args.validation_split)
test_generator = generator_wrap(cloak_data, test=True, n_classes=N_CLASSES, test_generator = generator_wrap(protect_imgs, test=True,
validation_split=args.validation_split) validation_split=args.validation_split)
EVAL_RES['transfer_model'] = args.transfer_model
base_model = load_extractor(args.transfer_model) base_model = load_extractor(args.transfer_model)
model = load_victim_model(teacher_model=base_model, number_classes=N_CLASSES) model = load_victim_model(teacher_model=base_model, number_classes=args.num_classes + 1)
original_X, original_Y = eval_uncloaked_test_data(cloak_data, N_CLASSES) # cloaked_test_X, cloaked_test_Y = eval_cloaked_test_data(cloak_data, args.num_classes,
cloaked_test_X, cloaked_test_Y = eval_cloaked_test_data(cloak_data, N_CLASSES, # validation_split=args.validation_split)
validation_split=args.validation_split)
try: # try:
model.fit_generator(train_generator, steps_per_epoch=cloak_data.number_samples // 32, train_data_dir, test_data_dir, num_classes, num_images = get_dataset_path(args.dataset)
validation_data=(original_X, original_Y), epochs=args.n_epochs, verbose=2, model.fit_generator(train_generator, steps_per_epoch=num_images // 32,
use_multiprocessing=False, workers=1) validation_data=(original_imgs, original_y),
except KeyboardInterrupt: epochs=args.n_epochs,
pass verbose=1,
use_multiprocessing=True, workers=5)
# except KeyboardInterrupt:
# pass
_, acc_original = model.evaluate(original_X, original_Y, verbose=0) _, acc_original = model.evaluate(original_imgs, original_y, verbose=0)
print("Accuracy on uncloaked/original images TEST: {:.4f}".format(acc_original)) print("Accuracy on uncloaked/original images TEST: {:.4f}".format(acc_original))
EVAL_RES['acc_original'] = acc_original # EVAL_RES['acc_original'] = acc_original
_, other_acc = model.evaluate_generator(test_generator, verbose=0, steps=50) _, other_acc = model.evaluate_generator(test_generator, verbose=0, steps=50)
print("Accuracy on other classes {:.4f}".format(other_acc)) print("Accuracy on other classes {:.4f}".format(other_acc))
EVAL_RES['other_acc'] = other_acc # EVAL_RES['other_acc'] = other_acc
dump_dictionary_as_json(EVAL_RES, os.path.join(CLOAK_DIR, "eval_seed{}.json".format(args.seed_idx))) # dump_dictionary_as_json(EVAL_RES, os.path.join(CLOAK_DIR, "eval_seed{}.json".format(args.seed_idx)))
def parse_arguments(argv): def parse_arguments(argv):
@ -139,16 +146,21 @@ def parse_arguments(argv):
parser.add_argument('--gpu', type=str, parser.add_argument('--gpu', type=str,
help='GPU id', default='0') help='GPU id', default='0')
parser.add_argument('--dataset', type=str, parser.add_argument('--dataset', type=str,
help='name of dataset', default='scrub') help='name of dataset', default='scrub')
parser.add_argument('--cloak_data', type=str, parser.add_argument('--num_classes', type=int,
help='name of dataset', default=520)
parser.add_argument('--directory', '-d', type=str,
help='name of the cloak result directory', help='name of the cloak result directory',
default='scrub_webface_dense_robust_extract_protectPatrick_Dempsey') default='img/')
parser.add_argument('--transfer_model', type=str, parser.add_argument('--transfer_model', type=str,
help='the feature extractor used for tracker model training. It can be the same or not same as the user\'s', default='vggface2_inception_extract') help='the feature extractor used for tracker model training. ', default='low_extract')
parser.add_argument('--batch_size', type=int, default=32) parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--validation_split', type=float, default=0.1) parser.add_argument('--validation_split', type=float, default=0.1)
parser.add_argument('--n_epochs', type=int, default=5) parser.add_argument('--n_epochs', type=int, default=3)
return parser.parse_args(argv) return parser.parse_args(argv)

View File

@ -1,4 +1,5 @@
import argparse import argparse
import glob
import os import os
import pickle import pickle
import random import random
@ -7,8 +8,9 @@ import sys
import numpy as np import numpy as np
from keras.applications.vgg16 import preprocess_input from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image from keras.preprocessing import image
from utils import load_extractor, get_dataset_path sys.path.append("../fawkes")
# from utils import load_extractor
import keras
def load_sample_dir(path, sample=10): def load_sample_dir(path, sample=10):
x_ls = [] x_ls = []
@ -30,21 +32,26 @@ def normalize(x):
def main(): def main():
extractor = load_extractor(args.feature_extractor) extractor = keras.models.load_model(args.feature_extractor)
path2emb = {} path2emb = {}
for target_dataset in args.candidate_datasets: model_dir = os.path.join(os.path.expanduser('~'), '.fawkes')
target_dataset_path, _, _, _ = get_dataset_path(target_dataset) for path in glob.glob(os.path.join(model_dir, "target_data/*")):
for target_class in os.listdir(target_dataset_path): print(path)
target_class_path = os.path.join(target_dataset_path, target_class) idx = int(path.split("/")[-1])
target_X = load_sample_dir(target_class_path) cur_image_paths = glob.glob(os.path.join(path, "*"))
cur_feature = extractor.predict(target_X) imgs = np.array([image.img_to_array(image.load_img(p, target_size=(224, 224))) for p in cur_image_paths])
cur_feature = np.mean(cur_feature, axis=0) imgs = preprocess_input(imgs)
path2emb[target_class_path] = cur_feature
for k, v in path2emb.items(): cur_feature = extractor.predict(imgs)
path2emb[k] = normalize(v) cur_feature = np.mean(cur_feature, axis=0)
path2emb[idx] = cur_feature
model_path = os.path.join(model_dir, "{}_extract.h5".format(args.feature_extractor_name))
emb_path = os.path.join(model_dir, "{}_emb.p".format(args.feature_extractor_name))
extractor.save(model_path)
pickle.dump(path2emb, open(emb_path, "wb"))
pickle.dump(path2emb, open("../feature_extractors/embeddings/{}_emb.p".format(args.feature_extractor), "wb"))
def parse_arguments(argv): def parse_arguments(argv):
@ -54,8 +61,12 @@ def parse_arguments(argv):
parser.add_argument('--candidate-datasets', nargs='+', parser.add_argument('--candidate-datasets', nargs='+',
help='path candidate datasets') help='path candidate datasets')
parser.add_argument('--feature-extractor', type=str, parser.add_argument('--feature-extractor', type=str,
help="path of the feature extractor used for optimization",
default="/home/shansixioing/fawkes/feature_extractors/high2_extract.h5")
parser.add_argument('--feature-extractor-name', type=str,
help="name of the feature extractor used for optimization", help="name of the feature extractor used for optimization",
default="webface_dense_robust_extract") default="high2")
return parser.parse_args(argv) return parser.parse_args(argv)

View File

@ -1,95 +0,0 @@
import argparse
import os
import pickle
import random
import sys
import numpy as np
from differentiator import FawkesMaskGeneration
from tensorflow import set_random_seed
from utils import load_extractor, CloakData, init_gpu
random.seed(12243)
np.random.seed(122412)
set_random_seed(12242)
NUM_IMG_PROTECTED = 400 # Number of images used to optimize the target class
BATCH_SIZE = 32
MAX_ITER = 1000
def diff_protected_data(sess, feature_extractors_ls, image_X, number_protect, target_X=None, th=0.01):
image_X = image_X[:number_protect]
differentiator = FawkesMaskGeneration(sess, feature_extractors_ls,
batch_size=BATCH_SIZE,
mimic_img=True,
intensity_range='imagenet',
initial_const=args.sd,
learning_rate=args.lr,
max_iterations=MAX_ITER,
l_threshold=th,
verbose=1, maximize=False, keep_final=False, image_shape=image_X.shape[1:])
if len(target_X) < len(image_X):
target_X = np.concatenate([target_X, target_X, target_X])
target_X = target_X[:len(image_X)]
cloaked_image_X = differentiator.attack(image_X, target_X)
return cloaked_image_X
def perform_defense():
RES = {}
sess = init_gpu(args.gpu)
FEATURE_EXTRACTORS = [args.feature_extractor]
RES_DIR = '../results/'
RES['num_img_protected'] = NUM_IMG_PROTECTED
RES['extractors'] = FEATURE_EXTRACTORS
num_protect = NUM_IMG_PROTECTED
print("Loading {} for optimization".format(args.feature_extractor))
feature_extractors_ls = [load_extractor(name) for name in FEATURE_EXTRACTORS]
protect_class = args.protect_class
cloak_data = CloakData(args.dataset, protect_class=protect_class)
RES_FILE_NAME = "{}_{}_protect{}".format(args.dataset, args.feature_extractor, cloak_data.protect_class)
RES_FILE_NAME = os.path.join(RES_DIR, RES_FILE_NAME)
print("Protect Class: ", cloak_data.protect_class)
cloak_data.target_path, cloak_data.target_data = cloak_data.select_target_label(feature_extractors_ls,
FEATURE_EXTRACTORS)
os.makedirs(RES_DIR, exist_ok=True)
os.makedirs(RES_FILE_NAME, exist_ok=True)
cloak_image_X = diff_protected_data(sess, feature_extractors_ls, cloak_data.protect_train_X,
number_protect=num_protect,
target_X=cloak_data.target_data, th=args.th)
cloak_data.cloaked_protect_train_X = cloak_image_X
RES['cloak_data'] = cloak_data
pickle.dump(RES, open(os.path.join(RES_FILE_NAME, 'cloak_data.p'), "wb"))
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str,
help='GPU id', default='0')
parser.add_argument('--dataset', type=str,
help='name of dataset', default='scrub')
parser.add_argument('--feature-extractor', type=str,
help="name of the feature extractor used for optimization",
default="webface_dense_robust_extract")
parser.add_argument('--th', type=float, default=0.007)
parser.add_argument('--sd', type=int, default=1e9)
parser.add_argument('--protect_class', type=str, default=None)
parser.add_argument('--lr', type=float, default=1)
return parser.parse_args(argv)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
perform_defense()

View File

@ -1,373 +0,0 @@
import json
import os
import pickle
import random
import keras
import keras.backend as K
import numpy as np
import tensorflow as tf
from keras.applications.vgg16 import preprocess_input
from keras.layers import Dense, Activation
from keras.models import Model
from keras.preprocessing import image
from keras.utils import to_categorical
from sklearn.metrics import pairwise_distances
# from keras.utils import get_file
def clip_img(X, preprocessing='raw'):
X = reverse_preprocess(X, preprocessing)
X = np.clip(X, 0.0, 255.0)
X = preprocess(X, preprocessing)
return X
def dump_dictionary_as_json(dict, outfile):
j = json.dumps(dict)
with open(outfile, "wb") as f:
f.write(j.encode())
def fix_gpu_memory(mem_fraction=1):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_fraction)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
tf_config.gpu_options.allow_growth = True
tf_config.log_device_placement = False
init_op = tf.global_variables_initializer()
sess = tf.Session(config=tf_config)
sess.run(init_op)
K.set_session(sess)
return sess
def load_victim_model(number_classes, teacher_model=None, end2end=False):
for l in teacher_model.layers:
l.trainable = end2end
x = teacher_model.layers[-1].output
x = Dense(number_classes)(x)
x = Activation('softmax', name="act")(x)
model = Model(teacher_model.input, x)
opt = keras.optimizers.Adadelta()
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
def init_gpu(gpu_index, force=False):
if isinstance(gpu_index, list):
gpu_num = ','.join([str(i) for i in gpu_index])
else:
gpu_num = str(gpu_index)
if "CUDA_VISIBLE_DEVICES" in os.environ and os.environ["CUDA_VISIBLE_DEVICES"] and not force:
print('GPU already initiated')
return
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_num
sess = fix_gpu_memory()
return sess
def preprocess(X, method):
assert method in {'raw', 'imagenet', 'inception', 'mnist'}
if method is 'raw':
pass
elif method is 'imagenet':
X = imagenet_preprocessing(X)
else:
raise Exception('unknown method %s' % method)
return X
def reverse_preprocess(X, method):
assert method in {'raw', 'imagenet', 'inception', 'mnist'}
if method is 'raw':
pass
elif method is 'imagenet':
X = imagenet_reverse_preprocessing(X)
else:
raise Exception('unknown method %s' % method)
return X
def imagenet_preprocessing(x, data_format=None):
if data_format is None:
data_format = K.image_data_format()
assert data_format in ('channels_last', 'channels_first')
x = np.array(x)
if data_format == 'channels_first':
# 'RGB'->'BGR'
if x.ndim == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
# Zero-center by mean pixel
if data_format == 'channels_first':
if x.ndim == 3:
x[0, :, :] -= mean[0]
x[1, :, :] -= mean[1]
x[2, :, :] -= mean[2]
if std is not None:
x[0, :, :] /= std[0]
x[1, :, :] /= std[1]
x[2, :, :] /= std[2]
else:
x[:, 0, :, :] -= mean[0]
x[:, 1, :, :] -= mean[1]
x[:, 2, :, :] -= mean[2]
if std is not None:
x[:, 0, :, :] /= std[0]
x[:, 1, :, :] /= std[1]
x[:, 2, :, :] /= std[2]
else:
x[..., 0] -= mean[0]
x[..., 1] -= mean[1]
x[..., 2] -= mean[2]
if std is not None:
x[..., 0] /= std[0]
x[..., 1] /= std[1]
x[..., 2] /= std[2]
return x
def imagenet_reverse_preprocessing(x, data_format=None):
import keras.backend as K
x = np.array(x)
if data_format is None:
data_format = K.image_data_format()
assert data_format in ('channels_last', 'channels_first')
if data_format == 'channels_first':
if x.ndim == 3:
# Zero-center by mean pixel
x[0, :, :] += 103.939
x[1, :, :] += 116.779
x[2, :, :] += 123.68
# 'BGR'->'RGB'
x = x[::-1, :, :]
else:
x[:, 0, :, :] += 103.939
x[:, 1, :, :] += 116.779
x[:, 2, :, :] += 123.68
x = x[:, ::-1, :, :]
else:
# Zero-center by mean pixel
x[..., 0] += 103.939
x[..., 1] += 116.779
x[..., 2] += 123.68
# 'BGR'->'RGB'
x = x[..., ::-1]
return x
def build_bottleneck_model(model, cut_off):
bottleneck_model = Model(model.input, model.get_layer(cut_off).output)
bottleneck_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return bottleneck_model
def load_extractor(name):
model = keras.models.load_model("../feature_extractors/{}.h5".format(name))
if hasattr(model.layers[-1], "activation") and model.layers[-1].activation == "softmax":
raise Exception(
"Given extractor's last layer is softmax, need to remove the top layers to make it into a feature extractor")
# if "extract" in name.split("/")[-1]:
# pass
# else:
# print("Convert a model to a feature extractor")
# model = build_bottleneck_model(model, model.layers[layer_idx].name)
# model.save(name + "extract")
# model = keras.models.load_model(name + "extract")
return model
def get_dataset_path(dataset):
if not os.path.exists("config.json"):
raise Exception("Please config the datasets before running protection code. See more in README and config.py.")
config = json.load(open("config.json", 'r'))
if dataset not in config:
raise Exception(
"Dataset {} does not exist, please download to data/ and add the path to this function... Abort".format(
dataset))
return config[dataset]['train_dir'], config[dataset]['test_dir'], config[dataset]['num_classes'], config[dataset][
'num_images']
def normalize(x):
return x / np.linalg.norm(x, axis=1, keepdims=True)
class CloakData(object):
def __init__(self, dataset, img_shape=(224, 224), protect_class=None):
self.dataset = dataset
self.img_shape = img_shape
self.train_data_dir, self.test_data_dir, self.number_classes, self.number_samples = get_dataset_path(dataset)
self.all_labels = sorted(list(os.listdir(self.train_data_dir)))
if protect_class:
self.protect_class = protect_class
else:
self.protect_class = random.choice(self.all_labels)
self.sybil_class = random.choice([l for l in self.all_labels if l != self.protect_class])
self.protect_train_X, self.protect_test_X = self.load_label_data(self.protect_class)
self.sybil_train_X, self.sybil_test_X = self.load_label_data(self.sybil_class)
self.cloaked_protect_train_X = None
self.cloaked_sybil_train_X = None
self.label2path_train, self.label2path_test, self.path2idx = self.build_data_mapping()
self.all_training_path = self.get_all_data_path(self.label2path_train)
self.all_test_path = self.get_all_data_path(self.label2path_test)
self.protect_class_path = self.get_class_image_files(os.path.join(self.train_data_dir, self.protect_class))
self.sybil_class_path = self.get_class_image_files(os.path.join(self.train_data_dir, self.sybil_class))
print("Find {} protect images".format(len(self.protect_class_path)))
def get_class_image_files(self, path):
return [os.path.join(path, f) for f in os.listdir(path)]
def extractor_ls_predict(self, feature_extractors_ls, X):
feature_ls = []
for extractor in feature_extractors_ls:
cur_features = extractor.predict(X)
feature_ls.append(cur_features)
concated_feature_ls = np.concatenate(feature_ls, axis=1)
concated_feature_ls = normalize(concated_feature_ls)
return concated_feature_ls
def load_embeddings(self, feature_extractors_names):
dictionaries = []
for extractor_name in feature_extractors_names:
path2emb = pickle.load(open("../feature_extractors/embeddings/{}_emb_norm.p".format(extractor_name), "rb"))
dictionaries.append(path2emb)
merge_dict = {}
for k in dictionaries[0].keys():
cur_emb = [dic[k] for dic in dictionaries]
merge_dict[k] = np.concatenate(cur_emb)
return merge_dict
def select_target_label(self, feature_extractors_ls, feature_extractors_names, metric='l2'):
original_feature_x = self.extractor_ls_predict(feature_extractors_ls, self.protect_train_X)
path2emb = self.load_embeddings(feature_extractors_names)
items = list(path2emb.items())
paths = [p[0] for p in items]
embs = [p[1] for p in items]
embs = np.array(embs)
pair_dist = pairwise_distances(original_feature_x, embs, metric)
max_sum = np.min(pair_dist, axis=0)
sorted_idx = np.argsort(max_sum)[::-1]
highest_num = 0
paired_target_X = None
final_target_class_path = None
for idx in sorted_idx[:5]:
target_class_path = paths[idx]
cur_target_X = self.load_dir(target_class_path)
cur_target_X = np.concatenate([cur_target_X, cur_target_X, cur_target_X])
cur_tot_sum, cur_paired_target_X = self.calculate_dist_score(self.protect_train_X, cur_target_X,
feature_extractors_ls,
metric=metric)
if cur_tot_sum > highest_num:
highest_num = cur_tot_sum
paired_target_X = cur_paired_target_X
final_target_class_path = target_class_path
np.random.shuffle(paired_target_X)
return final_target_class_path, paired_target_X
def calculate_dist_score(self, a, b, feature_extractors_ls, metric='l2'):
features1 = self.extractor_ls_predict(feature_extractors_ls, a)
features2 = self.extractor_ls_predict(feature_extractors_ls, b)
pair_cos = pairwise_distances(features1, features2, metric)
max_sum = np.min(pair_cos, axis=0)
max_sum_arg = np.argsort(max_sum)[::-1]
max_sum_arg = max_sum_arg[:len(a)]
max_sum = [max_sum[i] for i in max_sum_arg]
paired_target_X = [b[j] for j in max_sum_arg]
paired_target_X = np.array(paired_target_X)
return np.min(max_sum), paired_target_X
def get_all_data_path(self, label2path):
all_paths = []
for k, v in label2path.items():
cur_all_paths = [os.path.join(k, cur_p) for cur_p in v]
all_paths.extend(cur_all_paths)
return all_paths
def load_label_data(self, label):
train_label_path = os.path.join(self.train_data_dir, label)
test_label_path = os.path.join(self.test_data_dir, label)
train_X = self.load_dir(train_label_path)
test_X = self.load_dir(test_label_path)
return train_X, test_X
def load_dir(self, path):
assert os.path.exists(path)
x_ls = []
for file in os.listdir(path):
cur_path = os.path.join(path, file)
im = image.load_img(cur_path, target_size=self.img_shape)
im = image.img_to_array(im)
x_ls.append(im)
raw_x = np.array(x_ls)
return preprocess_input(raw_x)
def build_data_mapping(self):
label2path_train = {}
label2path_test = {}
idx = 0
path2idx = {}
for label_name in self.all_labels:
full_path_train = os.path.join(self.train_data_dir, label_name)
full_path_test = os.path.join(self.test_data_dir, label_name)
label2path_train[full_path_train] = list(os.listdir(full_path_train))
label2path_test[full_path_test] = list(os.listdir(full_path_test))
for img_file in os.listdir(full_path_train):
path2idx[os.path.join(full_path_train, img_file)] = idx
for img_file in os.listdir(full_path_test):
path2idx[os.path.join(full_path_test, img_file)] = idx
idx += 1
return label2path_train, label2path_test, path2idx
def generate_data_post_cloak(self, sybil=False):
assert self.cloaked_protect_train_X is not None
while True:
batch_X = []
batch_Y = []
cur_batch_path = random.sample(self.all_training_path, 32)
for p in cur_batch_path:
cur_y = self.path2idx[p]
if p in self.protect_class_path:
cur_x = random.choice(self.cloaked_protect_train_X)
elif sybil and (p in self.sybil_class):
cur_x = random.choice(self.cloaked_sybil_train_X)
else:
im = image.load_img(p, target_size=self.img_shape)
im = image.img_to_array(im)
cur_x = preprocess_input(im)
batch_X.append(cur_x)
batch_Y.append(cur_y)
batch_X = np.array(batch_X)
batch_Y = to_categorical(np.array(batch_Y), num_classes=self.number_classes)
yield batch_X, batch_Y