Examples¶
Run simulator on numpy array¶
offline_numy.py
Run simulator on wav file¶
import hearinglosssimulator as hls
in_filename = 'in_sound.wav'
out_filename = 'out_sound.wav'
#this parameters is important
calibration = 93 #dbSPL for 0dBFs
# define loss parameters
loss_params = { 'left' : {'freqs' : [125., 250., 500., 1000., 2000., 4000., 8000.],
'compression_degree': [0., 0., 0., 0., 0., 0., 0.],
'passive_loss_db' : [0., 0., 0., 0., 0., 0., 0.],
},
'right' : {'freqs' : [125., 250., 500., 1000., 2000., 4000., 8000.],
'compression_degree': [0., 0., 0., 0., 0., 0., 0.],
'passive_loss_db' : [0., 0., 0., 0., 0., 0., 0.],
}
}
params = dict(nb_freq_band=32, low_freq = 100., high_freq = 15000.,
tau_level = 0.005, level_step =1., level_max = 100.,
calibration = calibration,
loss_params = loss_params,
chunksize=512, backward_chunksize=1024)
gpu_platform_index=0#Put None to manually select
gpu_device_index=0#Put None to manually select
hls.compute_wave_file(in_filename, out_filename, processing_class=hls.InvComp, duration_limit = 10.,
gpu_platform_index =gpu_platform_index, gpu_device_index=gpu_device_index,
**params)
Plot dynamic filters¶
"""
This illustrate the main concept of dynamic filters depending on levels.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import hearinglosssimulator as hls
nb_channel=1
sample_rate = 44100.
level_max = 100.
level_step = 10.
freqs = [300., 1000., 4000., ]
# No compression loss
#compression_degree = [1]*len(freqs)
# Full compression loss
compression_degree = [0]*len(freqs)
coefficients_pgc, coefficients_hpaf, levels, band_overlap_gain = hls.make_cgc_filter(freqs, compression_degree, level_max, level_step, sample_rate)
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
fig3, ax3 = plt.subplots()
levels_colors = [ get_cmap('jet', len(levels))(l) for l, level in enumerate(levels) ]
freqs_colors = [ get_cmap('jet', len(freqs))(f) for f, freq in enumerate(freqs) ]
for f, freq in enumerate(freqs):
gains = np.zeros(len(levels))
for l, level in enumerate(levels):
all_filter = np.concatenate([coefficients_pgc[f,:,:],coefficients_hpaf[f,l,:,:], coefficients_pgc[f,:,:]], axis = 0)
w, h = hls.sosfreqz(all_filter, worN = 2**16,)
gains[l] = np.max(20*np.log10(np.abs(h)))
hls.plot_filter(all_filter, ax2, sample_rate, color=levels_colors[l])
hls.plot_filter(coefficients_hpaf[f,l,:,:], ax3, sample_rate, color=levels_colors[l])
hls.plot_filter(coefficients_pgc[f,:,:], ax3, sample_rate, color='k', lw=2)
ax3.axvline(freq, color='k')
ax2.axvline(freq, color='k')
ax1.plot(levels, levels+gains, label='{:0.1f}'.format(freq), color=freqs_colors[f])
ax1.plot(levels,levels, color='r', ls='--')
ax1.legend()
ax1.set_xlabel('input level (dB SPL)')
ax1.set_ylabel('output level (dB SPL)')
for ax in [ax2, ax3]:
ax.set_xlabel('freq (Hz)')
ax.set_ylabel('filter gain (dB)')
ax.set_xlim(0., 5000.)
ax.set_ylim(-70,20)
#~ fig1.savefig('input_output_gain.png')
#~ fig3.savefig('filter_pgc_and_hpaf.png')
#~ fig2.savefig('filter_cgc.png')
plt.show()
Run simulator online with sounddevice¶
"""
This example illustrate how to use the simulator in near real time with python-sounddevice.
"""
import sounddevice as sd
import time
import hearinglosssimulator as hls
nb_channel = 2
sample_rate = 44100.
#~ chunksize = 512
chunksize = 1024
backward_chunksize = chunksize*3
loss_params = { 'left' : {'freqs' : [125., 250., 500., 1000., 2000., 4000., 8000.],
'compression_degree': [0., 0., 0., 0., 0., 0., 0.],
'passive_loss_db' : [0., 0., 0., 0., 0., 0., 0.],
},
'right' : {'freqs' : [125., 250., 500., 1000., 2000., 4000., 8000.],
'compression_degree': [0., 0., 0., 0., 0., 0., 0.],
'passive_loss_db' : [0., 0., 0., 0., 0., 0., 0.],
}
}
params = dict(
nb_freq_band=32, low_freq = 100., high_freq = 15000.,
tau_level = 0.005,level_step =1., level_max=100.,
calibration = 110.,
loss_params = loss_params,
chunksize=chunksize, backward_chunksize=backward_chunksize,
)
processing = hls.InvComp(nb_channel=nb_channel, sample_rate=sample_rate,
dtype='float32', apply_configuration_at_init=False, **params)
processing.initialize()
# define the callback audio wire
index = 0
def callback(indata, outdata, frames, time, status):
if status:
print(status, flush=True)
global index
index += frames
returns = processing.proccesing_func(index, indata)
index2, out = returns['main_output']
if index2 is not None:
outdata[:] = out
else:
outdata[:] = 0
latency = 'low'
stream = sd.Stream(channels=nb_channel, callback=callback, samplerate=sample_rate,
blocksize=chunksize, latency=latency, device=None, dtype='float32')
# run the audio stream for 10 seconds.
stream.start()
time.sleep(10)
stream.stop()
Run simulator online with pyacq¶
"""
This illustrate how to use the InvCGC as a pyacq Node.
"""
import hearinglosssimulator as hls
import pyacq
import time
import pyaudio
#This pyaudio deveice_index
#~ pa = pyaudio.PyAudio()
#~ for i in range(pa.get_device_count()):
#~ dev = pa.get_device_info_by_index(i)
#~ hostapi_name = pa.get_host_api_info_by_index(dev['hostApi'])['name']
#~ print(dev, )
#~ print(hostapi_name)
#~ exit()
nb_channel = 1
sample_rate = 44100.
chunksize = 512
backward_chunksize = chunksize * 3
loss_params = { 'left' : {'freqs' : [125., 250., 500., 1000., 2000., 4000., 8000.],
'compression_degree': [0., 0., 0., 0., 0., 0., 0.],
'passive_loss_db' : [0., 0., 0., 0., 0., 0., 0.],
},
}
params = dict(
nb_freq_band=16, low_freq = 100., high_freq = 15000.,
tau_level = 0.005, smooth_time = 0.0005, level_step =1., level_max = 120.,
calibration = 93.979400086720375,
loss_params = loss_params,
chunksize=chunksize, backward_chunksize=backward_chunksize,
debug_mode=False,
)
stream_spec = dict(protocol='tcp', interface='127.0.0.1', transfertmode='plaindata')
man = pyacq.create_manager()
ng0 = man.create_nodegroup() # process for device
ng1 = man.create_nodegroup() # process for processing
dev = ng0.create_node('PyAudio')
dev.configure(nb_channel=nb_channel, sample_rate=sample_rate,
input_device_index=10,
output_device_index=10,
format='float32', chunksize=chunksize)
dev.output.configure(**stream_spec)
dev.initialize()
ng1.register_node_type_from_module('hearinglosssimulator', 'InvCGCNode')
node = ng1.create_node('HLSNode')
node.configure(**params)
#~ ng1.register_node_type_from_module('hearinglosssimulator', 'DoNothing')
#~ node = ng1.create_node('DoNothing')
#~ node.configure()
node.input.connect(dev.output)
node.outputs['signals'].configure(**stream_spec)
node.initialize()
dev.input.connect(node.outputs['signals'])
dev.start()
node.start()
time.sleep(15)
dev.stop()
node.stop()
man.close()