-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmp.py
More file actions
113 lines (84 loc) · 3.35 KB
/
mp.py
File metadata and controls
113 lines (84 loc) · 3.35 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import PIL.Image
import cv2
import mediapipe as mp
# from mediapipe.python.solutions.drawing_utils import *
import tensorflow as tf
from fastapi import FastAPI, File, UploadFile
from PIL import Image, ImageOps
mp_face_detection = mp.solutions.face_detection
mp_drawing = mp.solutions.drawing_utils
model = tf.keras.models.load_model('mask_mobile_net.h5')
# print(model.summary())
FONT = cv2.FONT_HERSHEY_SIMPLEX
# creating an instance of FastAPI
app = FastAPI()
#
# @app.get("/image")
# def image():
# For static images:
IMAGE_FILES = []
with mp_face_detection.FaceDetection(
model_selection=1,
min_detection_confidence=0.5,
) as face_detection:
for idx, file in enumerate(IMAGE_FILES):
image = cv2.imread(file)
# Convert the BGR image to RGB and process it with MediaPipe Face Detection.
results = face_detection.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
# Draw face detections of each face.
if not results.detections:
continue
annotated_image = image.copy()
for detection in results.detections:
print('Nose tip:')
print(mp_face_detection.get_key_point(
detection, mp_face_detection.FaceKeyPoint.NOSE_TIP))
mp_drawing.draw_detection(annotated_image, detection)
cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', annotated_image)
@app.post("/video")
def video():
# For webcam input:
return
cap = cv2.VideoCapture(0)
with mp_face_detection.FaceDetection(model_selection=0, min_detection_confidence=0.5) as face_detection:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = face_detection.process(image)
# Draw the face detection annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.detections:
for detection in results.detections:
# points for the bouding box of the detected face
x = int(detection.location_data.relative_bounding_box.xmin * 640)
y = int(detection.location_data.relative_bounding_box.ymin * 480)
w = int(detection.location_data.relative_bounding_box.width * 640)
h = int(detection.location_data.relative_bounding_box.height * 480)
# declaring the face dimensions
face = image[y:y + h, x:x + w, :]
face = cv2.resize(image, (224, 224))
face = tf.keras.preprocessing.image.img_to_array(face)
face = face / 255.
face = tf.expand_dims(face, axis=0)
pred = model.predict(face)
pred_res = pred.argmax(axis=1)[0]
print(pred)
print(pred_res)
label = "WITH MASK" if pred_res == 0 else "NO MASK"
color = (0, 255, 0) if label == "WITH MASK" else (0, 0, 255)
# label = "{}: {:.2f}%".format(label, max(pred) * 100)
cv2.rectangle(image, pt1=(x, y), pt2=(x + w, y + h), color=color, thickness=2)
cv2.putText(image, label, (x, y - 10), FONT, 0.5, color, 2)
# Flip the image horizontally for a selfie-view display.
cv2.imshow('Face Mask Detector', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()