Spaces:
Running
Running
| import cv2 | |
| import gradio as gr | |
| import numpy as np | |
| from human_pose_estimator import PoseEstimator | |
| from pose_estimator import rect | |
| pose_estimator = PoseEstimator("cpu") | |
| def get_box(image): | |
| image_box, _ = rect(pose_estimator, image) | |
| return image_box | |
| def predict(img: np.ndarray): | |
| poses, _, _ = pose_estimator.get_poses(img, 512) | |
| for pose in poses: | |
| pose.draw(img) | |
| cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]), | |
| (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0)) | |
| return img | |
| footer = r""" | |
| <center> | |
| <b> | |
| Demo for <a href='https://github.com/Daniil-Osokin/lightweight-human-pose-estimation.pytorch'>Lightweight OpenPose</a> | |
| </b> | |
| </center> | |
| """ | |
| with gr.Blocks(title="OpenPose") as app: | |
| gr.HTML("<center><h1>Human Pose Estimation Pytorch</h1></center>") | |
| gr.HTML("<center><h3>Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose</h3></center>") | |
| with gr.Row(equal_height=False): | |
| with gr.Column(): | |
| input_img = gr.Image(type="numpy", label="Input image") | |
| # input_img = gr.Video(source="webcam") | |
| run_btn = gr.Button(variant="primary") | |
| with gr.Column(): | |
| output_img = gr.Image(type="pil", label="Output image") | |
| gr.ClearButton(components=[input_img, output_img], variant="stop") | |
| run_btn.click(predict, [input_img], [output_img]) | |
| with gr.Row(): | |
| blobs = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)] | |
| examples = gr.Dataset(components=[input_img], samples=blobs) | |
| examples.click(lambda x: x[0], [examples], [input_img]) | |
| with gr.Row(): | |
| gr.HTML(footer) | |
| app.launch(share=False, debug=True, show_error=True) | |
| app.queue() | |