# Author: Paul Brossier <piem@piem.org>
-import numpy
+import numpy as np
def downsample_blocking(frames, hop_s, dtype='float32'):
# downmixing to one channel
# zero padding to have a multiple of hop_s
if downsampled.shape[0] % hop_s != 0:
pad_length = hop_s + downsampled.shape[0] / hop_s * hop_s - downsampled.shape[0]
- downsampled = numpy.hstack([downsampled, numpy.zeros(pad_length, dtype = dtype)])
+ downsampled = np.hstack([downsampled, np.zeros(pad_length, dtype = dtype)])
# blocking
return downsampled.reshape(downsampled.shape[0] / hop_s, hop_s)
-def computeModulation(serie,wLen,withLog=True):
+
+def computeModulation(serie, wLen, withLog=True):
'''
- Compute the modulation of a parameter centered. Extremums are set to zero.
+ Compute the modulation of a parameter centered.
+ Extremums are set to zero.
Args :
- serie : list or numpy array containing the serie.
- modul : Modulation of the serie.
'''
-
- modul = numpy.zeros((1,len(serie)))[0];
+ sLen = len(serie)
+ modul = np.zeros((sLen,))
w = int(wLen/2)
- for i in range(w,len(serie)-w):
+ if withLog:
+ machine_epsilon = np.finfo(np.float32).eps
+
+ for i in range(w, sLen-w):
d = serie[i-w:i+w]
if withLog:
- d = numpy.log(d)
- modul[i] = numpy.var(d)
+ if not (d > 0).all():
+ d[d <= 0] = machine_epsilon # prevent log(0)=inf
+ d = np.log(d)
- modul[:w] = modul[w]
+ modul[i] = np.var(d)
+ modul[:w] = modul[w]
modul[-w:] = modul[-w-1]
- return modul;
+ return modul
+
-def segmentFromValues(values,offset=0):
+def segmentFromValues(values, offset=0):
'''
'''
'''
fh = float(sr)/2.0
- mh = 2595*numpy.log10(1+fh/700)
+ mh = 2595*np.log10(1+fh/700)
step = mh/nbFilters;
- mcenter = numpy.arange(step,mh,step)
+ mcenter = np.arange(step,mh,step)
fcenter = 700*(10**(mcenter/2595)-1)
- filterbank = numpy.zeros((fftLen,nbFilters));
+ filterbank = np.zeros((fftLen,nbFilters));
for i,_ in enumerate(fcenter) :
else :
fmax = fcenter[i+1]
- imin = numpy.ceil(fmin/fh*fftLen)
- imax = numpy.ceil(fmax/fh*fftLen)
+ imin = np.ceil(fmin/fh*fftLen)
+ imax = np.ceil(fmax/fh*fftLen)
filterbank[imin:imax,i] = triangle(imax-imin)
- triangle : triangle filter.
'''
- triangle = numpy.zeros((1,length))[0]
- climax= numpy.ceil(length/2)
+ triangle = np.zeros((1,length))[0]
+ climax= np.ceil(length/2)
- triangle[0:climax] = numpy.linspace(0,1,climax)
- triangle[climax:length] = numpy.linspace(1,0,length-climax)
+ triangle[0:climax] = np.linspace(0,1,climax)
+ triangle[climax:length] = np.linspace(1,0,length-climax)
return triangle
-def entropy(serie,nbins=10,base=numpy.exp(1),approach='unbiased'):
+def entropy(serie,nbins=10,base=np.exp(1),approach='unbiased'):
'''
Compute entropy of a serie using the histogram method.
estimate = 0
sigma = 0
- bins,edges = numpy.histogram(serie,nbins);
+ bins,edges = np.histogram(serie,nbins);
ncell = len(bins)
- norm = (numpy.max(edges)-numpy.min(edges))/len(bins)
+ norm = (np.max(edges)-np.min(edges))/len(bins)
for b in bins :
if b == 0 :
logf = 0
else :
- logf = numpy.log(b)
+ logf = np.log(b)
estimate = estimate - b*logf
sigma = sigma + b * logf**2
- count = numpy.sum(bins)
+ count = np.sum(bins)
estimate=estimate/count;
- sigma=numpy.sqrt( (sigma/count-estimate**2)/float(count-1) );
- estimate=estimate+numpy.log(count)+numpy.log(norm);
+ sigma=np.sqrt( (sigma/count-estimate**2)/float(count-1) );
+ estimate=estimate+np.log(count)+np.log(norm);
nbias=-(ncell-1)/(2*count);
if approach =='unbiased' :
else :
return 0
- estimate=estimate/numpy.log(base);
- nbias =nbias /numpy.log(base);
- sigma =sigma /numpy.log(base);
+ estimate=estimate/np.log(base);
+ nbias =nbias /np.log(base);
+ sigma =sigma /np.log(base);
return estimate