-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathinference.py
executable file
·100 lines (84 loc) · 3.41 KB
/
inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
#!/usr/bin python3
import os
import sys
import tensorflow as tf
from tensorflow import keras
from keras.models import load_model
from keras import backend as K
import librosa
import soundfile as sf
import numpy as np
import random
import time
import io
from six.moves.urllib.request import urlopen
sys.path.append(os.path.abspath("data/src"))
from DftSpectrogram import DftSpectrogram
from focal_loss import focal_loss
K.clear_session()
def get_feature(wav_path, length=102800, random_start=False):
try:
x, sr = librosa.load(wav_path, sr=16000)
assert sr == 16000
if length > len(x):
x = np.concatenate([x] * int(np.ceil(length/len(x))))
if random_start:
x = x[random.randint(0, len(x) - length):]
feature = x[:length]
return feature / np.max(np.abs(feature))
except Exception as e:
print("Error with getting feature from %s: %s" % (wav_path, str(e)))
return None
def get_feature_from_url(wav_path, length=102800, random_start=False):
try:
# x, sr = librosa.load(wav_path, sr=16000)
x, sr = sf.read(io.BytesIO(urlopen(wav_path).read()), samplerate=16000)
assert sr == 16000
if length > len(x):
x = np.concatenate([x] * int(np.ceil(length/len(x))))
if random_start:
x = x[random.randint(0, len(x) - length):]
feature = x[:length]
return feature / np.max(np.abs(feature))
except Exception as e:
print("Error with getting feature from %s: %s" % (wav_path, str(e)))
return None
def get_feature_from_bytes(bytes_string, length=102800, random_start=False):
try:
x, sr = librosa.core.load(io.BytesIO(bytes_string), sr=16000)
# x, sr = sf.read(bytes_string, samplerate=16000)
assert sr == 16000
if length > len(x):
x = np.concatenate([x] * int(np.ceil(length/len(x))))
if random_start:
x = x[random.randint(0, len(x) - length):]
feature = x[:length]
return feature / np.max(np.abs(feature))
except Exception as e:
print("Error with getting feature from bytes string: %s" % (str(e)))
return None
def test_pred(model, filepath, url=False, from_bytes=False):
if url:
feature = get_feature_from_url(filepath, length=102800, random_start=True)
elif from_bytes:
feature = get_feature_from_bytes(filepath, length=102800, random_start=True)
else:
feature = get_feature(filepath, length=102800, random_start=True)
transfeat = feature[np.newaxis, ..., np.newaxis]
return model.predict(transfeat)
def run_tests(items=['data/test/test_2s.wav']):
for item in items:
print("Making prediction on file {name}".format(name=item))
start = time.time()
output = test_pred(model, item)
end = time.time()
print('Human: {o[0][0]:.3f}\nAttack: {o[0][1]:.3f}\n'.format(o=output)) # [[human_score, spoof_score]]
print("Time elapsed {spent:.4f}\n\n".format(spent=end - start))
pass
if __name__ == "__main__":
modelpath = '/home/anton/contests/boosters/deploy/web/data/model/model.h5'
custom_objects = {'DftSpectrogram': DftSpectrogram, 'focal_loss_fixed': focal_loss()}
model = load_model(modelpath, custom_objects=custom_objects)
model.summary()
items = ['data/test/test_2s.wav', 'data/test/test_25s.wav', 'data/test/test_4s.wav', 'data/test/attack_19s.wav']
run_tests(items)