Spaces:
Runtime error
Runtime error
device agnostic code taken out
Browse files
app.py
CHANGED
|
@@ -10,9 +10,6 @@ from typing import Tuple, Dict
|
|
| 10 |
# Setup class names
|
| 11 |
class_names = ["real", "spoof"]
|
| 12 |
|
| 13 |
-
# Setup device-agnostic code
|
| 14 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 15 |
-
|
| 16 |
### 2. Model and transforms preparation ###
|
| 17 |
|
| 18 |
# Create EffNetB2 model
|
|
@@ -38,7 +35,7 @@ def predict(img):# -> Tuple[Dict, float]:
|
|
| 38 |
start_time = timer()
|
| 39 |
|
| 40 |
# Transform the target image and add a batch dimension
|
| 41 |
-
img = data_transform(img).unsqueeze(0)
|
| 42 |
|
| 43 |
# Put model into evaluation mode and turn on inference mode
|
| 44 |
vggface2.eval()
|
|
|
|
| 10 |
# Setup class names
|
| 11 |
class_names = ["real", "spoof"]
|
| 12 |
|
|
|
|
|
|
|
|
|
|
| 13 |
### 2. Model and transforms preparation ###
|
| 14 |
|
| 15 |
# Create EffNetB2 model
|
|
|
|
| 35 |
start_time = timer()
|
| 36 |
|
| 37 |
# Transform the target image and add a batch dimension
|
| 38 |
+
img = data_transform(img).unsqueeze(0)
|
| 39 |
|
| 40 |
# Put model into evaluation mode and turn on inference mode
|
| 41 |
vggface2.eval()
|
model.py
CHANGED
|
@@ -19,7 +19,7 @@ def create_vggface2_model(num_classes:int=2,
|
|
| 19 |
transforms (torchvision.transforms): vggface2 image transforms.
|
| 20 |
"""
|
| 21 |
# load the saved model
|
| 22 |
-
model_pred = InceptionResnetV1(pretrained='vggface2' , classify = True , num_classes = 2)
|
| 23 |
layer_list = list(model_pred.children())[-5:] # all final layers
|
| 24 |
model_pred = nn.Sequential(*list(model_pred.children())[:-5])
|
| 25 |
|
|
@@ -43,8 +43,6 @@ def create_vggface2_model(num_classes:int=2,
|
|
| 43 |
out_features=2, # same number of output units as our number of classes
|
| 44 |
bias=True))
|
| 45 |
|
| 46 |
-
model_pred = model_pred.to(device)
|
| 47 |
-
|
| 48 |
# Write transform for image
|
| 49 |
data_transform = transforms.Compose([
|
| 50 |
# Resize the images to 64x64 --> RECOMENDATION FROM TRAINING FROM FACENET --> 160x160
|
|
|
|
| 19 |
transforms (torchvision.transforms): vggface2 image transforms.
|
| 20 |
"""
|
| 21 |
# load the saved model
|
| 22 |
+
model_pred = InceptionResnetV1(pretrained='vggface2' , classify = True , num_classes = 2)
|
| 23 |
layer_list = list(model_pred.children())[-5:] # all final layers
|
| 24 |
model_pred = nn.Sequential(*list(model_pred.children())[:-5])
|
| 25 |
|
|
|
|
| 43 |
out_features=2, # same number of output units as our number of classes
|
| 44 |
bias=True))
|
| 45 |
|
|
|
|
|
|
|
| 46 |
# Write transform for image
|
| 47 |
data_transform = transforms.Compose([
|
| 48 |
# Resize the images to 64x64 --> RECOMENDATION FROM TRAINING FROM FACENET --> 160x160
|