import requests from PIL import Image import matplotlib.pyplot as plt from transformers import CLIPProcessor, CLIPModel model = CLIPModel.from_pretrained("flaviagiammarino/pubmed-clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("flaviagiammarino/pubmed-clip-vit-base-patch32") url = "https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcSjP8UWzpGqXKwlC1zPRhcJOXThfI4pXgg2Zhd1B-cstpnEDalY" image = Image.open(requests.get(url, stream=True).raw) text = ["Chest X-Ray", "Brain MRI"] inputs = processor(text=text, images=image, return_tensors="pt", padding=True) probs = model(**inputs).logits_per_image.softmax(dim=1).detach().numpy().flatten() plt.subplots() plt.imshow(image) plt.title("".join([x[0] + ": " + x[1] + " " for x in zip(text, [format(prob, ".4%") for prob in probs])])) plt.axis("off") plt.show()