Audio Classification
English
Audio
Classification
joka13 commited on
Commit
55cf66a
·
verified ·
1 Parent(s): 2292220

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -0
app.py CHANGED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import librosa
3
+ import tensorflow as tf
4
+ import streamlit as st
5
+ import sounddevice as sd
6
+ import wave
7
+ import os
8
+
9
+ # Constants
10
+ window_length = 0.02 # 20ms window length
11
+ hop_length = 0.0025 # 2.5ms hop length
12
+ sample_rate = 22050 # Standard audio sample rate
13
+ n_mels = 128 # Number of mel filter banks
14
+ threshold_zcr = 0.1 # Adjust this threshold to detect breath based on ZCR
15
+ threshold_rmse = 0.1 # Adjust this threshold to detect breath based on RMSE
16
+ max_len = 500 # Fix length for feature extraction
17
+
18
+ # Load TFLite model
19
+ interpreter = tf.lite.Interpreter(model_path="model_breath_logspec_mfcc_cnn.tflite")
20
+ interpreter.allocate_tensors()
21
+
22
+ # Get input and output details
23
+ input_details = interpreter.get_input_details()
24
+ output_details = interpreter.get_output_details()
25
+
26
+ # Function to extract breath features
27
+ def extract_breath_features(y, sr):
28
+ frame_length = int(window_length * sr)
29
+ hop_length_samples = int(hop_length * sr)
30
+
31
+ zcr = librosa.feature.zero_crossing_rate(y=y, frame_length=frame_length, hop_length=hop_length_samples)
32
+ rmse = librosa.feature.rms(y=y, frame_length=frame_length, hop_length=hop_length_samples)
33
+
34
+ zcr = zcr.T.flatten()
35
+ rmse = rmse.T.flatten()
36
+
37
+ breaths = (zcr > threshold_zcr) & (rmse > threshold_rmse)
38
+ breath_feature = np.where(breaths, 1, 0)
39
+
40
+ return breath_feature
41
+
42
+ # Feature extraction
43
+ def extract_features(file_path):
44
+ try:
45
+ y, sr = librosa.load(file_path, sr=None)
46
+ mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
47
+ logspec = librosa.amplitude_to_db(librosa.feature.melspectrogram(y=y, sr=sr, n_mels=n_mels))
48
+ breath_feature = extract_breath_features(y, sr)
49
+
50
+ # Fix lengths
51
+ mfcc = librosa.util.fix_length(mfcc, size=max_len, axis=1)
52
+ logspec = librosa.util.fix_length(logspec, size=max_len, axis=1)
53
+ breath_feature = librosa.util.fix_length(breath_feature, size=max_len)
54
+
55
+ return np.vstack((mfcc, logspec, breath_feature))
56
+ except Exception as e:
57
+ st.error(f"Error processing audio: {e}")
58
+ return None
59
+
60
+ # Prepare input for model
61
+ def prepare_single_data(features):
62
+ features = librosa.util.fix_length(features, size=max_len, axis=1)
63
+ features = features[np.newaxis, ..., np.newaxis] # Add batch and channel dimensions
64
+ return features.astype(np.float32) # Convert to FLOAT32
65
+
66
+ # Predict audio class
67
+ def predict_audio(file_path):
68
+ features = extract_features(file_path)
69
+ if features is not None:
70
+ prepared_features = prepare_single_data(features)
71
+ interpreter.set_tensor(input_details[0]['index'], prepared_features)
72
+ interpreter.invoke()
73
+ prediction = interpreter.get_tensor(output_details[0]['index'])
74
+ predicted_class = np.argmax(prediction, axis=1)
75
+ predicted_prob = prediction[0]
76
+ return predicted_class[0], predicted_prob
77
+ return None, None
78
+
79
+ # Record audio function
80
+ def record_audio(duration=5, samplerate=22050):
81
+ st.info(f"🎤 Recording for {duration} seconds...")
82
+ audio_data = sd.rec(int(duration * samplerate), samplerate=samplerate, channels=1, dtype=np.int16)
83
+ sd.wait()
84
+ st.success("✅ Recording Complete!")
85
+ return audio_data, samplerate
86
+
87
+ # Save recorded audio as .wav
88
+ def save_wav(file_path, audio_data, samplerate):
89
+ with wave.open(file_path, 'wb') as wf:
90
+ wf.setnchannels(1)
91
+ wf.setsampwidth(2)
92
+ wf.setframerate(samplerate)
93
+ wf.writeframes(audio_data.tobytes())
94
+
95
+ # Streamlit UI
96
+ st.title('🎙️ Audio Deepfake Detection')
97
+ st.write('Upload or record an audio file to classify it as real or fake.')
98
+
99
+ # File uploader
100
+ uploaded_file = st.file_uploader('📂 Upload an audio file', type=['wav', 'mp3'])
101
+ recorded_file_path = "recorded_audio.wav"
102
+
103
+ # Record audio button
104
+ if st.button("🎤 Record Live Audio"):
105
+ duration = st.slider("⏳ Set Duration (seconds)", 1, 10, 5)
106
+ audio_data, samplerate = record_audio(duration)
107
+ save_wav(recorded_file_path, audio_data, samplerate)
108
+ st.audio(recorded_file_path, format="audio/wav")
109
+
110
+ # Process uploaded or recorded audio
111
+ if uploaded_file is not None:
112
+ with open("uploaded_audio.wav", 'wb') as f:
113
+ f.write(uploaded_file.getbuffer())
114
+ file_path = "uploaded_audio.wav"
115
+ st.audio(file_path, format="audio/wav")
116
+ elif os.path.exists(recorded_file_path):
117
+ file_path = recorded_file_path
118
+ else:
119
+ file_path = None
120
+
121
+ # Run prediction
122
+ if file_path:
123
+ prediction, probability = predict_audio(file_path)
124
+ if prediction is not None:
125
+ st.write(f'**Predicted Class:** {prediction}')
126
+ st.write(f'**Probability of being Real:** {probability[0]*100:.2f}%')
127
+ st.write(f'**Probability of being Fake:** {probability[1]*100:.2f}%')
128
+ else:
129
+ st.error("❌ Failed to process the audio file.")