-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpGan_fSplice.py
More file actions
316 lines (237 loc) · 11 KB
/
pGan_fSplice.py
File metadata and controls
316 lines (237 loc) · 11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
"""
Splice face images generated by a GAN (specificially Progressive GAN, but not restricted to it) into a face image patch in another image.
The GAN generated image is considered hereof as donor and the image into which the GAN face is spliced is hereof the recipient image.
It is assumed that the donor image has one and only one face, which is typically the case in progressive GAN generated images at the time of writing this code. If this assumption is falsified by modification in the GAN, then it is assumed that this code will become unstable and it is up to the user to modify the code accordingly.
--
MIT License
Copyright (C) 2018 Ashish Gupta
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
author: Ashish Gupta
email: ashishagupta@gmail.com
version: 0.1.0
"""
from __future__ import print_function
# import inbuilt libraries
import os
import sys
import argparse
# import third-party libraries
import cv2
import dlib
import numpy
# path to pretrained model utilized in face detection
# assumes the model file is in the same directory as this code, please specify full path in case this condition is invalid on user's local machine
PREDICTOR_PATH = "pretrained_model.dat"
# pre-defined parameters used by the dlib library towards face detection
SCALE_FACTOR = 1
FEATHER_AMOUNT = 11
FACE_POINTS = list(range(17, 68))
MOUTH_POINTS = list(range(48, 61))
RIGHT_BROW_POINTS = list(range(17, 22))
LEFT_BROW_POINTS = list(range(22, 27))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
NOSE_POINTS = list(range(27, 35))
JAW_POINTS = list(range(0, 17))
# Points used to line up the face patch in donor and recipient images
ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS +
RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS)
# Points from the donor image to overlay on the recipient image
OVERLAY_POINTS = [
LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS + RIGHT_BROW_POINTS,
NOSE_POINTS + MOUTH_POINTS,
]
# blur to use during color correction
COLOR_CORRECT_BLUR_FRAC = 0.6
# instantiate objects from dlib library classes for face detection
detector = dlib.get_frontal_face_detector()
predictor = None
class TooManyFaces(Exception):
pass
class NoFaces(Exception):
pass
def loadModel(path = None):
global predictor
if predictor is None:
path = PREDICTOR_PATH if path is None else path
predictor = dlib.shape_predictor(path)
def get_landmarks(im):
global predictor
rects = detector(im, 1)
if len(rects) > 1:
print('Image seems to have more than one detecable faces present.')
if len(rects) == 0:
raise NoFaces
return numpy.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])
def annotate_landmarks(im, landmarks):
im = im.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(im, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
cv2.circle(im, pos, 3, color=(0, 255, 255))
return im
def draw_convex_hull(im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)
def get_face_mask(im, landmarks):
im = numpy.zeros(im.shape[:2], dtype=numpy.float64)
for group in OVERLAY_POINTS:
draw_convex_hull(im,
landmarks[group],
color=1)
im = numpy.array([im, im, im]).transpose((1, 2, 0))
im = (cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0
im = cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)
return im
def transformation_from_points(points1, points2):
"""
Return an affine transformation [s * R | T] such that:
sum ||s*R*p1,i + T - p2,i||^2
is minimized.
Solve the procrustes problem by subtracting centroids, scaling by the standard deviation, and then using the SVD to calculate the rotation.
"""
points1 = points1.astype(numpy.float64)
points2 = points2.astype(numpy.float64)
c1 = numpy.mean(points1, axis=0)
c2 = numpy.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 = numpy.std(points1)
s2 = numpy.std(points2)
points1 /= s1
points2 /= s2
U, S, Vt = numpy.linalg.svd(points1.T * points2)
R = (U * Vt).T
return numpy.vstack([numpy.hstack(((s2 / s1) * R,
c2.T - (s2 / s1) * R * c1.T)),
numpy.matrix([0., 0., 1.])])
def read_im_and_landmarks(fname):
"""
Read the computed facial landmarks
"""
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = cv2.resize(im, (im.shape[1] * SCALE_FACTOR,
im.shape[0] * SCALE_FACTOR))
s = get_landmarks(im)
return im, s
def warp_im(im, M, dshape):
"""
Affine transform donor face image patch to overlay on the recipient face patch with minimal distortion
"""
output_im = numpy.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def correct_colors(im1, im2, landmarks1):
"""
Towards perceptual satisfaction of splicing donor image into recipient.
The color of images is argubly the strongest perceptual attribute, which is ameliorated here.
Note: Further work will improve on geometric distortion (cylindrical, spherical, etc.) and image intrinsics
"""
blur_amount = COLOR_CORRECT_BLUR_FRAC * numpy.linalg.norm(
numpy.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -
numpy.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)
# Note: to avoid divide-by-zero errors
im2_blur += (128 * (im2_blur <= 1.0)).astype(im2_blur.dtype)
return (im2.astype(numpy.float64) * im1_blur.astype(numpy.float64) /
im2_blur.astype(numpy.float64))
def splice_donor_recipient(image1, image2, imageout):
"""
Splice the donor face patch into the recipient face patch
"""
im1, landmarks1 = read_im_and_landmarks(image1)
im2, landmarks2 = read_im_and_landmarks(image2)
M = transformation_from_points(landmarks1[ALIGN_POINTS],
landmarks2[ALIGN_POINTS])
mask = get_face_mask(im2, landmarks2)
warped_mask = warp_im(mask, M, im1.shape)
combined_mask = numpy.max([get_face_mask(im1, landmarks1), warped_mask],
axis=0)
warped_im2 = warp_im(im2, M, im1.shape)
warped_corrected_im2 = correct_colors(im1, warped_im2, landmarks1)
output_im = im1 * (1.0 - combined_mask) + warped_corrected_im2 * combined_mask
# save the spliced GAN-MediFor image to file
cv2.imwrite(imageout, output_im)
def process_images():
"""
The program reads images in a donor and recipient directories and saves output into another spliced directory.
By default, the program will assume the directories are:
donor_directory = './GAN_Faces/'
recipient_directory = './MediFor_Images/'
out_directory = './GAN_MediFor/'
"""
parser = argparse.ArgumentParser(description="Splice image patch for face from GAN generated donor to detected face in recipient image.")
parser.add_argument("-d", "--donor", dest="donor", default="./GAN_Faces/", help="path to directory containing GAN generated faces")
parser.add_argument("-r", "--recipient", dest="recipient", default="./MediFor_Images/", help="path to directory containing images into which faces are spliced")
parser.add_argument("-o", "--output", dest="output", default="./GAN_MediFor/", help="output directory into which spliced images are saved")
parser.add_argument("-f", "--files", dest="files", default=False, help="If the input and output are files not directories", action='store_true')
args = parser.parse_args()
donor_directory = args.donor
recipient_directory = args.recipient
out_directory = args.output
fi = args.files
# donor images
try:
head_image_paths = os.listdir(donor_directory) if not fi else [donor_directory]
if not os.path.exists(head_image_paths[0]):
raise ValueError
except:
print('Did you create the donor image directory?')
print('Quiting ...')
return
# recipient images
try:
recipient_paths = os.listdir(recipient_directory) if not fi else [recipient_directory]
if not os.path.exists(recipient_paths[0]):
raise ValueError
except:
print('Did you create the recipient image directory?')
print('Quiting ...')
return
# output folder existence
if not os.path.exists(out_directory) and not fi:
print('Did you create the output image directory?')
print('Quiting...')
return
# log errors
lf = open('./log.txt', 'w')
"""
Towards the objectives of the MediFor program, all Progressive GAN generated face images are utilized in combination with all available images in recipient images.
Naming convention:
The spliced images are named as <donor image name>--<recipient image name>.png
The spliced images can be renamed at a later date if a hashing function is used to rename donor or recipient image file names.
"""
for head_img in head_image_paths:
head_path = donor_directory + head_img if not fi else head_img
for recipient_img in recipient_paths:
recipient_path = recipient_directory + recipient_img if not fi else recipient_img
out_img = head_img.split('.')[0] + '--' + recipient_img.split('.')[0] + '.png'
out_path = os.path.join(out_directory, out_img) if not fi else out_directory
try:
splice_donor_recipient(recipient_path, head_path, out_path)
print('donor: {}, recipient: {}\n output: {}'.format(head_path, recipient_path, out_path))
except Exception as err:
print(err)
lf.write('Issue with: {}\n'.format(out_img))
lf.close()
if __name__ == '__main__':
"""
Please read the documentation to set the data in appropriate directories.
The program will read images from these directories
"""
loadModel()
process_images()