Spaces:
Runtime error
Runtime error
| import os | |
| os.system("pip freeze") | |
| from huggingface_hub import hf_hub_download | |
| os.system("pip -qq install facenet_pytorch") | |
| from facenet_pytorch import MTCNN | |
| from torchvision import transforms | |
| import torch, PIL | |
| from tqdm.notebook import tqdm | |
| import gradio as gr | |
| import torch | |
| modelarcanev4 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.4", filename="ArcaneGANv0.4.jit") | |
| modelarcanev3 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.3", filename="ArcaneGANv0.3.jit") | |
| modelarcanev2 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.2", filename="ArcaneGANv0.2.jit") | |
| mtcnn = MTCNN(image_size=256, margin=80) | |
| # simplest ye olde trustworthy MTCNN for face detection with landmarks | |
| def detect(img): | |
| # Detect faces | |
| batch_boxes, batch_probs, batch_points = mtcnn.detect(img, landmarks=True) | |
| # Select faces | |
| if not mtcnn.keep_all: | |
| batch_boxes, batch_probs, batch_points = mtcnn.select_boxes( | |
| batch_boxes, batch_probs, batch_points, img, method=mtcnn.selection_method | |
| ) | |
| return batch_boxes, batch_points | |
| # my version of isOdd, should make a separate repo for it :D | |
| def makeEven(_x): | |
| return _x if (_x % 2 == 0) else _x+1 | |
| # the actual scaler function | |
| def scale(boxes, _img, max_res=1_500_000, target_face=256, fixed_ratio=0, max_upscale=2, VERBOSE=False): | |
| x, y = _img.size | |
| ratio = 2 #initial ratio | |
| #scale to desired face size | |
| if (boxes is not None): | |
| if len(boxes)>0: | |
| ratio = target_face/max(boxes[0][2:]-boxes[0][:2]); | |
| ratio = min(ratio, max_upscale) | |
| if VERBOSE: print('up by', ratio) | |
| if fixed_ratio>0: | |
| if VERBOSE: print('fixed ratio') | |
| ratio = fixed_ratio | |
| x*=ratio | |
| y*=ratio | |
| #downscale to fit into max res | |
| res = x*y | |
| if res > max_res: | |
| ratio = pow(res/max_res,1/2); | |
| if VERBOSE: print(ratio) | |
| x=int(x/ratio) | |
| y=int(y/ratio) | |
| #make dimensions even, because usually NNs fail on uneven dimensions due skip connection size mismatch | |
| x = makeEven(int(x)) | |
| y = makeEven(int(y)) | |
| size = (x, y) | |
| return _img.resize(size) | |
| """ | |
| A useful scaler algorithm, based on face detection. | |
| Takes PIL.Image, returns a uniformly scaled PIL.Image | |
| boxes: a list of detected bboxes | |
| _img: PIL.Image | |
| max_res: maximum pixel area to fit into. Use to stay below the VRAM limits of your GPU. | |
| target_face: desired face size. Upscale or downscale the whole image to fit the detected face into that dimension. | |
| fixed_ratio: fixed scale. Ignores the face size, but doesn't ignore the max_res limit. | |
| max_upscale: maximum upscale ratio. Prevents from scaling images with tiny faces to a blurry mess. | |
| """ | |
| def scale_by_face_size(_img, max_res=1_500_000, target_face=256, fix_ratio=0, max_upscale=2, VERBOSE=False): | |
| boxes = None | |
| boxes, _ = detect(_img) | |
| if VERBOSE: print('boxes',boxes) | |
| img_resized = scale(boxes, _img, max_res, target_face, fix_ratio, max_upscale, VERBOSE) | |
| return img_resized | |
| size = 256 | |
| means = [0.485, 0.456, 0.406] | |
| stds = [0.229, 0.224, 0.225] | |
| t_stds = torch.tensor(stds).cpu().half().float()[:,None,None] | |
| t_means = torch.tensor(means).cpu().half().float()[:,None,None] | |
| def makeEven(_x): | |
| return int(_x) if (_x % 2 == 0) else int(_x+1) | |
| img_transforms = transforms.Compose([ | |
| transforms.ToTensor(), | |
| transforms.Normalize(means,stds)]) | |
| def tensor2im(var): | |
| return var.mul(t_stds).add(t_means).mul(255.).clamp(0,255).permute(1,2,0) | |
| def proc_pil_img(input_image, model): | |
| transformed_image = img_transforms(input_image)[None,...].cpu().half().float() | |
| with torch.no_grad(): | |
| result_image = model(transformed_image)[0] | |
| output_image = tensor2im(result_image) | |
| output_image = output_image.detach().cpu().numpy().astype('uint8') | |
| output_image = PIL.Image.fromarray(output_image) | |
| return output_image | |
| modelv4 = torch.jit.load(modelarcanev4,map_location='cpu').eval().cpu().half().float() | |
| modelv3 = torch.jit.load(modelarcanev3,map_location='cpu').eval().cpu().half().float() | |
| modelv2 = torch.jit.load(modelarcanev2,map_location='cpu').eval().cpu().half().float() | |
| def version4(im): | |
| im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1) | |
| res = proc_pil_img(im, modelv4) | |
| return res | |
| def version3(im): | |
| im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1) | |
| res = proc_pil_img(im, modelv3) | |
| return res | |
| def version2(im): | |
| im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1) | |
| res = proc_pil_img(im, modelv2) | |
| return res | |
| block = gr.Blocks() | |
| with block: | |
| gr.Markdown("Gradio Demo for ArcaneGAN, portrait to Arcane style. To use it, simply upload your image. Try out the different versions by clicking on the tabs. Please use a cropped portrait picture for best results.") | |
| with gr.Tab("version four"): | |
| with gr.Row(): | |
| facepaint4 = gr.inputs.Image(type="pil",shape=(512,512)) | |
| faceout4 = gr.outputs.Image(type="pil") | |
| face_run = gr.Button("Run") | |
| face_run.click(version4, inputs=facepaint4, outputs=faceout4) | |
| with gr.Tab("version three"): | |
| with gr.Row(): | |
| facepaint3 = gr.inputs.Image(type="pil") | |
| faceout3 = gr.outputs.Image(type="pil") | |
| face_run = gr.Button("Run") | |
| face_run.click(version3, inputs=facepaint3, outputs=faceout3) | |
| with gr.Tab("version two"): | |
| with gr.Row(): | |
| facepaint2 = gr.inputs.Image(type="pil") | |
| faceout2 = gr.outputs.Image(type="pil") | |
| face_run = gr.Button("Run") | |
| face_run.click(version2, inputs=facepaint2, outputs=faceout2) | |
| block.launch(enable_queue=True) |