Skip to content

Commit 919f60c

Browse files
Merge pull request #281 from msyache/yache/quality-score-qs
Add references to face recognition quality attribute
2 parents e426157 + 8835614 commit 919f60c

4 files changed

Lines changed: 103 additions & 20 deletions

File tree

curl/face/detect.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
# <detection_model_3>
2-
curl -H "Ocp-Apim-Subscription-Key: TODO_INSERT_YOUR_FACE_SUBSCRIPTION_KEY_HERE" "TODO_INSERT_YOUR_FACE_ENDPOINT_HERE/face/v1.0/detect?detectionModel=detection_03&returnFaceId=true&returnFaceLandmarks=false" -H "Content-Type: application/json" --data-ascii "{\"url\":\"https://upload.wikimedia.org/wikipedia/commons/c/c3/RH_Louise_Lillian_Gish.jpg\"}"
2+
curl -H "Ocp-Apim-Subscription-Key: TODO_INSERT_YOUR_FACE_SUBSCRIPTION_KEY_HERE" "TODO_INSERT_YOUR_FACE_ENDPOINT_HERE/face/v1.0/detect?detectionModel=detection_03&returnFaceId=true&returnFaceLandmarks=false&returnFaceAttributes=qualityForRecognition" -H "Content-Type: application/json" --data-ascii "{\"url\":\"https://upload.wikimedia.org/wikipedia/commons/c/c3/RH_Louise_Lillian_Gish.jpg\"}"
33
# </detection_model_3>
44

55
# <detection_model_1>
6-
curl -H "Ocp-Apim-Subscription-Key: TODO_INSERT_YOUR_FACE_SUBSCRIPTION_KEY_HERE" "TODO_INSERT_YOUR_FACE_ENDPOINT_HERE/face/v1.0/detect?detectionModel=detection_01&returnFaceId=true&returnFaceLandmarks=false&returnFaceAttributes=age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise" -H "Content-Type: application/json" --data-ascii "{\"url\":\"https://upload.wikimedia.org/wikipedia/commons/c/c3/RH_Louise_Lillian_Gish.jpg\"}"
6+
curl -H "Ocp-Apim-Subscription-Key: TODO_INSERT_YOUR_FACE_SUBSCRIPTION_KEY_HERE" "TODO_INSERT_YOUR_FACE_ENDPOINT_HERE/face/v1.0/detect?detectionModel=detection_01&returnFaceId=true&returnFaceLandmarks=false&returnFaceAttributes=age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise,qualityForRecognition" -H "Content-Type: application/json" --data-ascii "{\"url\":\"https://upload.wikimedia.org/wikipedia/commons/c/c3/RH_Louise_Lillian_Gish.jpg\"}"
77
# </detection_model_1>
88

99
# <detect_for_similar>

dotnet/Face/FaceQuickstart.cs

Lines changed: 40 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,8 @@ public static async Task DetectFaceExtract(IFaceClient client, string url, strin
158158
returnFaceAttributes: new List<FaceAttributeType> { FaceAttributeType.Accessories, FaceAttributeType.Age,
159159
FaceAttributeType.Blur, FaceAttributeType.Emotion, FaceAttributeType.Exposure, FaceAttributeType.FacialHair,
160160
FaceAttributeType.Gender, FaceAttributeType.Glasses, FaceAttributeType.Hair, FaceAttributeType.HeadPose,
161-
FaceAttributeType.Makeup, FaceAttributeType.Noise, FaceAttributeType.Occlusion, FaceAttributeType.Smile },
161+
FaceAttributeType.Makeup, FaceAttributeType.Noise, FaceAttributeType.Occlusion, FaceAttributeType.Smile,
162+
FaceAttributeType.Smile, FaceAttributeType.QualityForRecognition },
162163
// We specify detection model 1 because we are retrieving attributes.
163164
detectionModel: DetectionModel.Detection01,
164165
recognitionModel: recognitionModel);
@@ -230,6 +231,9 @@ public static async Task DetectFaceExtract(IFaceClient client, string url, strin
230231
Console.WriteLine($"Occlusion : {string.Format("EyeOccluded: {0}", face.FaceAttributes.Occlusion.EyeOccluded ? "Yes" : "No")} " +
231232
$" {string.Format("ForeheadOccluded: {0}", face.FaceAttributes.Occlusion.ForeheadOccluded ? "Yes" : "No")} {string.Format("MouthOccluded: {0}", face.FaceAttributes.Occlusion.MouthOccluded ? "Yes" : "No")}");
232233
Console.WriteLine($"Smile : {face.FaceAttributes.Smile}");
234+
235+
// Get quality for recognition attribute
236+
Console.WriteLine($"QualityForRecognition : {face.FaceAttributes.QualityForRecognition}");
233237
Console.WriteLine();
234238
}
235239
}
@@ -238,16 +242,27 @@ public static async Task DetectFaceExtract(IFaceClient client, string url, strin
238242

239243
// Detect faces from image url for recognition purpose. This is a helper method for other functions in this quickstart.
240244
// Parameter `returnFaceId` of `DetectWithUrlAsync` must be set to `true` (by default) for recognition purpose.
245+
// Parameter `FaceAttributes` is set to include the QualityForRecognition attribute.
246+
// Recognition model must be set to recognition_03 or recognition_04 as a result.
247+
// Result faces with insufficient quality for recognition are filtered out.
241248
// The field `faceId` in returned `DetectedFace`s will be used in Face - Find Similar, Face - Verify. and Face - Identify.
242249
// It will expire 24 hours after the detection call.
243250
// <snippet_face_detect_recognize>
244251
private static async Task<List<DetectedFace>> DetectFaceRecognize(IFaceClient faceClient, string url, string recognition_model)
245252
{
246253
// Detect faces from image URL. Since only recognizing, use the recognition model 1.
247254
// We use detection model 3 because we are not retrieving attributes.
248-
IList<DetectedFace> detectedFaces = await faceClient.Face.DetectWithUrlAsync(url, recognitionModel: recognition_model, detectionModel: DetectionModel.Detection03);
249-
Console.WriteLine($"{detectedFaces.Count} face(s) detected from image `{Path.GetFileName(url)}`");
250-
return detectedFaces.ToList();
255+
IList<DetectedFace> detectedFaces = await faceClient.Face.DetectWithUrlAsync(url, recognitionModel: recognition_model, detectionModel: DetectionModel.Detection03, FaceAttributes: new List<FaceAttributeType> { FaceAttributeType.QualityForRecognition });
256+
List<DetectedFace> sufficientQualityFaces = new List<DetectedFace>();
257+
foreach (DetectedFace detectedFace in detectedFaces){
258+
var faceQualityForRecognition = detectedFace.FaceAttributes.QualityForRecognition;
259+
if (faceQualityForRecognition.HasValue && (faceQualityForRecognition.Value >= QualityForRecognition.Medium)){
260+
sufficientQualityFaces.Add(detectedFace);
261+
}
262+
}
263+
Console.WriteLine($"{detectedFaces.Count} face(s) with {sufficientQualityFaces.Count} having sufficient quality for recognition detected from image `{Path.GetFileName(url)}`");
264+
265+
return sufficientQualityFaces.ToList();
251266
}
252267
// </snippet_face_detect_recognize>
253268
/*
@@ -402,6 +417,27 @@ public static async Task IdentifyInPersonGroup(IFaceClient client, string url, s
402417
// Add face to the person group person.
403418
foreach (var similarImage in personDictionary[groupedFace])
404419
{
420+
Console.WriteLine($"Check whether image is of sufficient quality for recognition");
421+
IList<DetectedFace> detectedFaces = await client.Face.DetectWithUrlAsync($"{url}{similarImage}",
422+
recognitionModel: recognition_model,
423+
detectionModel: DetectionModel.Detection03,
424+
returnFaceAttributes: new List<FaceAttributeType> { FaceAttributeType.QualityForRecognition });
425+
bool sufficientQuality = true;
426+
foreach (var face in detectedFaces)
427+
{
428+
var faceQualityForRecognition = face.FaceAttributes.QualityForRecognition;
429+
// Only "high" quality images are recommended for person enrollment
430+
if (faceQualityForRecognition.HasValue && (faceQualityForRecognition.Value != QualityForRecognition.High)){
431+
sufficientQuality = false;
432+
break;
433+
}
434+
}
435+
436+
if (!sufficientQuality){
437+
continue;
438+
}
439+
440+
405441
Console.WriteLine($"Add face to the person group person({groupedFace}) from image `{similarImage}`");
406442
PersistedFace face = await client.PersonGroupPerson.AddFaceFromUrlAsync(personGroupId, person.PersonId,
407443
$"{url}{similarImage}", similarImage);

javascript/Face/sdk_quickstart.js

Lines changed: 31 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -42,9 +42,10 @@ async function DetectFaceExtract() {
4242
await Promise.all (image_file_names.map (async function (image_file_name) {
4343
let detected_faces = await client.face.detectWithUrl(image_base_url + image_file_name,
4444
{
45-
returnFaceAttributes: ["Accessories","Age","Blur","Emotion","Exposure","FacialHair","Gender","Glasses","Hair","HeadPose","Makeup","Noise","Occlusion","Smile"],
45+
returnFaceAttributes: ["Accessories","Age","Blur","Emotion","Exposure","FacialHair","Gender","Glasses","Hair","HeadPose","Makeup","Noise","Occlusion","Smile","QualityForRecognition"],
4646
// We specify detection model 1 because we are retrieving attributes.
47-
detectionModel: "detection_01"
47+
detectionModel: "detection_01",
48+
recognitionModel: "recognition_03"
4849
});
4950
console.log (detected_faces.length + " face(s) detected from image " + image_file_name + ".");
5051
console.log("Face attributes for face(s) in " + image_file_name + ":");
@@ -128,6 +129,8 @@ async function DetectFaceExtract() {
128129
console.log(" Mouth occluded: " + (face.faceAttributes.occlusion.mouthOccluded ? "Yes" : "No"));
129130

130131
console.log("Smile: " + face.faceAttributes.smile);
132+
133+
console.log("QualityForRecognition: " + face.faceAttributes.qualityForRecognition)
131134
console.log();
132135
});
133136
}));
@@ -137,13 +140,15 @@ async function DetectFaceExtract() {
137140
// <recognize>
138141
async function DetectFaceRecognize(url) {
139142
// Detect faces from image URL. Since only recognizing, use the recognition model 4.
140-
// We use detection model 3 because we are not retrieving attributes.
143+
// We use detection model 3 because we are only retrieving the qualityForRecognition attribute.
144+
// Result faces with quality for recognition lower than "medium" are filtered out.
141145
let detected_faces = await client.face.detectWithUrl(url,
142146
{
143147
detectionModel: "detection_03",
144-
recognitionModel: "recognition_04"
148+
recognitionModel: "recognition_04",
149+
returnFaceAttributes: ["QualityForRecognition"]
145150
});
146-
return detected_faces;
151+
return detected_faces.filter(face => face.faceAttributes.qualityForRecognition == 'high' || face.faceAttributes.qualityForRecognition == 'medium');
147152
}
148153
// </recognize>
149154

@@ -199,8 +204,25 @@ async function AddFacesToPersonGroup(person_dictionary, person_group_id) {
199204

200205
// Add faces to the person group person.
201206
await Promise.all (value.map (async function (similar_image) {
202-
console.log("Add face to the person group person: (" + key + ") from image: " + similar_image + ".");
203-
await client.personGroupPerson.addFaceFromUrl(person_group_id, person.personId, image_base_url + similar_image);
207+
// Check if the image is of sufficent quality for recognition.
208+
let sufficientQuality = true;
209+
let detected_faces = await client.face.detectWithUrl(image_base_url + similar_image,
210+
{
211+
returnFaceAttributes: ["QualityForRecognition"],
212+
detectionModel: "detection_03",
213+
recognitionModel: "recognition_03"
214+
});
215+
detected_faces.forEach(detected_face => {
216+
if (detected_face.faceAttributes.qualityForRecognition != 'high'){
217+
sufficientQuality = false;
218+
}
219+
});
220+
221+
// Quality is sufficent, add to group.
222+
if (sufficientQuality){
223+
console.log("Add face to the person group person: (" + key + ") from image: " + similar_image + ".");
224+
await client.personGroupPerson.addFaceFromUrl(person_group_id, person.personId, image_base_url + similar_image);
225+
}
204226
}));
205227
}));
206228

@@ -257,10 +279,9 @@ async function IdentifyInPersonGroup() {
257279
await WaitForPersonGroupTraining(person_group_id);
258280
console.log();
259281

260-
// Detect faces from source image url.
282+
// Detect faces from source image url and only take those with sufficient quality for recognition.
261283
let face_ids = (await DetectFaceRecognize(image_base_url + source_image_file_name)).map (face => face.faceId);
262-
263-
// Identify the faces in a person group.
284+
// Identify the faces in a person group.
264285
let results = await client.face.identify(face_ids, { personGroupId : person_group_id});
265286
await Promise.all (results.map (async function (result) {
266287
let person = await client.personGroupPerson.get(person_group_id, result.candidates[0].personId);

python/Face/FaceQuickstart.py

Lines changed: 30 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
from PIL import Image, ImageDraw
1515
from azure.cognitiveservices.vision.face import FaceClient
1616
from msrest.authentication import CognitiveServicesCredentials
17-
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person
17+
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, QualityForRecognition
1818
# </snippet_imports>
1919

2020
'''
@@ -317,16 +317,40 @@ def drawFaceRectangles() :
317317
# Add to a woman person
318318
for image in woman_images:
319319
w = open(image, 'r+b')
320+
# Check if the image is of sufficent quality for recognition.
321+
sufficientQuality = True
322+
detected_faces = face_client.face.detect_with_url(url=single_face_image_url, detection_model='detection_03', recognition_model='recognition_04', return_face_attributes=['qualityForRecognition'])
323+
for face in detected_faces:
324+
if face.face_attributes.quality_for_recognition != QualityForRecognition.high:
325+
sufficientQuality = False
326+
break
327+
if not sufficientQuality: continue
320328
face_client.person_group_person.add_face_from_stream(PERSON_GROUP_ID, woman.person_id, w)
321329

322330
# Add to a man person
323331
for image in man_images:
324332
m = open(image, 'r+b')
333+
# Check if the image is of sufficent quality for recognition.
334+
sufficientQuality = True
335+
detected_faces = face_client.face.detect_with_url(url=single_face_image_url, detection_model='detection_03', recognition_model='recognition_04', return_face_attributes=['qualityForRecognition'])
336+
for face in detected_faces:
337+
if face.face_attributes.quality_for_recognition != QualityForRecognition.high:
338+
sufficientQuality = False
339+
break
340+
if not sufficientQuality: continue
325341
face_client.person_group_person.add_face_from_stream(PERSON_GROUP_ID, man.person_id, m)
326342

327343
# Add to a child person
328344
for image in child_images:
329345
ch = open(image, 'r+b')
346+
# Check if the image is of sufficent quality for recognition.
347+
sufficientQuality = True
348+
detected_faces = face_client.face.detect_with_url(url=single_face_image_url, detection_model='detection_03', recognition_model='recognition_04', return_face_attributes=['qualityForRecognition'])
349+
for face in detected_faces:
350+
if face.face_attributes.quality_for_recognition != QualityForRecognition.high:
351+
sufficientQuality = False
352+
break
353+
if not sufficientQuality: continue
330354
face_client.person_group_person.add_face_from_stream(PERSON_GROUP_ID, child.person_id, ch)
331355
# </snippet_persongroup_assign>
332356

@@ -364,10 +388,12 @@ def drawFaceRectangles() :
364388

365389
# Detect faces
366390
face_ids = []
367-
# We use detection model 3 to get better performance.
368-
faces = face_client.face.detect_with_stream(image, detection_model='detection_03')
391+
# We use detection model 3 to get better performance, recognition model 4 to support quality for recognition attribute.
392+
faces = face_client.face.detect_with_stream(image, detection_model='detection_03', recognition_model='recognition_04', return_face_attributes=['qualityForRecognition'])
369393
for face in faces:
370-
face_ids.append(face.face_id)
394+
# Only take the face if it is of sufficient quality.
395+
if face.face_attributes.quality_for_recognition == QualityForRecognition.high or face.face_attributes.quality_for_recognition == QualityForRecognition.medium:
396+
face_ids.append(face.face_id)
371397
# </snippet_identify_testimage>
372398

373399
# <snippet_identify>

0 commit comments

Comments
 (0)