Abhishek Gola
commited on
Commit
·
4fce5f6
1
Parent(s):
7094f7b
Updated yunet model usage and added .gitattributes
Browse files- .gitattributes +26 -0
- .gitignore +9 -0
- demo.py +5 -8
- yunet.py +55 -0
.gitattributes
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Caffe
|
| 3 |
+
*.caffemodel filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
|
| 5 |
+
# Tensorflow
|
| 6 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.pbtxt filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
|
| 9 |
+
# Torch
|
| 10 |
+
*.t7 filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.net filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
|
| 13 |
+
# Darknet
|
| 14 |
+
*.weights filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
|
| 16 |
+
# ONNX
|
| 17 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
|
| 19 |
+
# NPY
|
| 20 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
|
| 22 |
+
# Images
|
| 23 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.pyc
|
| 2 |
+
**/__pycache__
|
| 3 |
+
**/__pycache__/**
|
| 4 |
+
|
| 5 |
+
.vscode
|
| 6 |
+
|
| 7 |
+
build/
|
| 8 |
+
**/build
|
| 9 |
+
**/build/**
|
demo.py
CHANGED
|
@@ -1,19 +1,20 @@
|
|
| 1 |
# This file is part of OpenCV Zoo project.
|
| 2 |
# It is subject to the license terms in the LICENSE file found in the same directory.
|
| 3 |
|
| 4 |
-
|
| 5 |
-
import sys
|
| 6 |
import argparse
|
| 7 |
|
| 8 |
import numpy as np
|
| 9 |
import cv2 as cv
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
# Check OpenCV version
|
| 12 |
opencv_python_version = lambda str_version: tuple(map(int, (str_version.split("."))))
|
| 13 |
assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \
|
| 14 |
"Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python"
|
| 15 |
|
| 16 |
-
sys.path.append('../face_detection_yunet')
|
| 17 |
from yunet import YuNet
|
| 18 |
|
| 19 |
from ediffiqa import eDifFIQA
|
|
@@ -52,8 +53,6 @@ ediffiqa_parser.add_argument('--model_q', '-mq', type=str, default='ediffiqa_tin
|
|
| 52 |
help="Usage: Set model type, defaults to 'ediffiqa_tiny_jun2024.onnx'.")
|
| 53 |
|
| 54 |
yunet_parser = parser.add_argument_group("YuNet", " Parameters of YuNet - For face detection ")
|
| 55 |
-
yunet_parser.add_argument('--model_d', '-md', type=str, default='../face_detection_yunet/face_detection_yunet_2023mar.onnx',
|
| 56 |
-
help="Usage: Set model type, defaults to '../face_detection_yunet/face_detection_yunet_2023mar.onnx'.")
|
| 57 |
yunet_parser.add_argument('--conf_threshold', type=float, default=0.9,
|
| 58 |
help='Usage: Set the minimum needed confidence for the model to identify a face, defauts to 0.9. Smaller values may result in faster detection, but will limit accuracy. Filter out faces of confidence < conf_threshold.')
|
| 59 |
yunet_parser.add_argument('--nms_threshold', type=float, default=0.3,
|
|
@@ -104,7 +103,6 @@ def align_image(image, detection_data):
|
|
| 104 |
|
| 105 |
|
| 106 |
if __name__ == '__main__':
|
| 107 |
-
|
| 108 |
backend_id = backend_target_pairs[args.backend_target][0]
|
| 109 |
target_id = backend_target_pairs[args.backend_target][1]
|
| 110 |
|
|
@@ -120,7 +118,7 @@ if __name__ == '__main__':
|
|
| 120 |
|
| 121 |
# Instantiate YuNet (face detection)
|
| 122 |
model_detect = YuNet(
|
| 123 |
-
modelPath=
|
| 124 |
inputSize=[320, 320],
|
| 125 |
confThreshold=args.conf_threshold,
|
| 126 |
nmsThreshold=args.nms_threshold,
|
|
@@ -152,4 +150,3 @@ if __name__ == '__main__':
|
|
| 152 |
|
| 153 |
print(f" Saving visualization to results.jpg. ")
|
| 154 |
cv.imwrite('results.jpg', viz_image)
|
| 155 |
-
|
|
|
|
| 1 |
# This file is part of OpenCV Zoo project.
|
| 2 |
# It is subject to the license terms in the LICENSE file found in the same directory.
|
| 3 |
|
|
|
|
|
|
|
| 4 |
import argparse
|
| 5 |
|
| 6 |
import numpy as np
|
| 7 |
import cv2 as cv
|
| 8 |
+
from huggingface_hub import hf_hub_download
|
| 9 |
+
|
| 10 |
+
# Download ONNX model from Hugging Face
|
| 11 |
+
model_d = hf_hub_download(repo_id="opencv/face_detection_yunet", filename="face_detection_yunet_2023mar.onnx")
|
| 12 |
|
| 13 |
# Check OpenCV version
|
| 14 |
opencv_python_version = lambda str_version: tuple(map(int, (str_version.split("."))))
|
| 15 |
assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \
|
| 16 |
"Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python"
|
| 17 |
|
|
|
|
| 18 |
from yunet import YuNet
|
| 19 |
|
| 20 |
from ediffiqa import eDifFIQA
|
|
|
|
| 53 |
help="Usage: Set model type, defaults to 'ediffiqa_tiny_jun2024.onnx'.")
|
| 54 |
|
| 55 |
yunet_parser = parser.add_argument_group("YuNet", " Parameters of YuNet - For face detection ")
|
|
|
|
|
|
|
| 56 |
yunet_parser.add_argument('--conf_threshold', type=float, default=0.9,
|
| 57 |
help='Usage: Set the minimum needed confidence for the model to identify a face, defauts to 0.9. Smaller values may result in faster detection, but will limit accuracy. Filter out faces of confidence < conf_threshold.')
|
| 58 |
yunet_parser.add_argument('--nms_threshold', type=float, default=0.3,
|
|
|
|
| 103 |
|
| 104 |
|
| 105 |
if __name__ == '__main__':
|
|
|
|
| 106 |
backend_id = backend_target_pairs[args.backend_target][0]
|
| 107 |
target_id = backend_target_pairs[args.backend_target][1]
|
| 108 |
|
|
|
|
| 118 |
|
| 119 |
# Instantiate YuNet (face detection)
|
| 120 |
model_detect = YuNet(
|
| 121 |
+
modelPath=model_d,
|
| 122 |
inputSize=[320, 320],
|
| 123 |
confThreshold=args.conf_threshold,
|
| 124 |
nmsThreshold=args.nms_threshold,
|
|
|
|
| 150 |
|
| 151 |
print(f" Saving visualization to results.jpg. ")
|
| 152 |
cv.imwrite('results.jpg', viz_image)
|
|
|
yunet.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is part of OpenCV Zoo project.
|
| 2 |
+
# It is subject to the license terms in the LICENSE file found in the same directory.
|
| 3 |
+
#
|
| 4 |
+
# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
|
| 5 |
+
# Third party copyrights are property of their respective owners.
|
| 6 |
+
|
| 7 |
+
from itertools import product
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import cv2 as cv
|
| 11 |
+
|
| 12 |
+
class YuNet:
|
| 13 |
+
def __init__(self, modelPath, inputSize=[320, 320], confThreshold=0.6, nmsThreshold=0.3, topK=5000, backendId=0, targetId=0):
|
| 14 |
+
self._modelPath = modelPath
|
| 15 |
+
self._inputSize = tuple(inputSize) # [w, h]
|
| 16 |
+
self._confThreshold = confThreshold
|
| 17 |
+
self._nmsThreshold = nmsThreshold
|
| 18 |
+
self._topK = topK
|
| 19 |
+
self._backendId = backendId
|
| 20 |
+
self._targetId = targetId
|
| 21 |
+
|
| 22 |
+
self._model = cv.FaceDetectorYN.create(
|
| 23 |
+
model=self._modelPath,
|
| 24 |
+
config="",
|
| 25 |
+
input_size=self._inputSize,
|
| 26 |
+
score_threshold=self._confThreshold,
|
| 27 |
+
nms_threshold=self._nmsThreshold,
|
| 28 |
+
top_k=self._topK,
|
| 29 |
+
backend_id=self._backendId,
|
| 30 |
+
target_id=self._targetId)
|
| 31 |
+
|
| 32 |
+
@property
|
| 33 |
+
def name(self):
|
| 34 |
+
return self.__class__.__name__
|
| 35 |
+
|
| 36 |
+
def setBackendAndTarget(self, backendId, targetId):
|
| 37 |
+
self._backendId = backendId
|
| 38 |
+
self._targetId = targetId
|
| 39 |
+
self._model = cv.FaceDetectorYN.create(
|
| 40 |
+
model=self._modelPath,
|
| 41 |
+
config="",
|
| 42 |
+
input_size=self._inputSize,
|
| 43 |
+
score_threshold=self._confThreshold,
|
| 44 |
+
nms_threshold=self._nmsThreshold,
|
| 45 |
+
top_k=self._topK,
|
| 46 |
+
backend_id=self._backendId,
|
| 47 |
+
target_id=self._targetId)
|
| 48 |
+
|
| 49 |
+
def setInputSize(self, input_size):
|
| 50 |
+
self._model.setInputSize(tuple(input_size))
|
| 51 |
+
|
| 52 |
+
def infer(self, image):
|
| 53 |
+
# Forward
|
| 54 |
+
faces = self._model.detect(image)
|
| 55 |
+
return np.empty(shape=(0, 5)) if faces[1] is None else faces[1]
|