I have a small app that accepts incoming audio stream from the internet and I'm trying to find the frequency of a tone or continuous beep. At the time of the tone / beep it is the only thing that would be playing. The rest of the audio is either silence or talking. I'm using the node-pitchfinder npm module to find the tone and when I use a sample audio clip I made of 2,000Hz the app prints out the frequency within one or two Hz. When I pull the audio stream online I keep getting results like 17,000 Hz. My guess is that there is some "noise" in the audio signal and that's what the node-pitchfinder module is picking up.
Is there any way I can filter out that noise in real time to get an accurate frequency?
The streaming audio file is: http://relay.broadcastify.com/fq85hty701gnm4z.mp3
Code below:
const fs = require('fs');
const fsa = require('fs-extra');
const Lame = require('lame');
const Speaker = require('speaker');
const Volume = require('pcm-volume');
const Analyser = require('audio-analyser')
const request = require('request')
const Chunker = require('stream-chunker');
const { YIN } = require('node-pitchfinder')
const detectPitch = YIN({ sampleRate: 44100})
//const BUFSIZE = 64;
const BUFSIZE = 500;
var decoder = new Lame.Decoder();
decoder.on('format', function(format){onFormat(format)});
var chunker = Chunker(BUFSIZE);
chunker.pipe(decoder);
var options = {
url: 'http://relay.broadcastify.com/fq85hty701gnm4z.mp3',
headers: {
"Upgrade-Insecure-Requests": 1,
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Safari/605.1.15"
}
}
var audio_stream = request(options);
//var audio_stream = fs.createReadStream('./2000.mp3');
audio_stream.pipe(chunker);
function onFormat(format)
{
//if (volume == "undefined")
volume = 1.0;
vol = new Volume(volume);
speaker = new Speaker(format);
analyser = createAnalyser(format);
analyser.on('data', sample);
console.log(format);
vol.pipe(speaker);
vol.pipe(analyser);
decoder.pipe(vol);
vol.setVolume(volume);
}
function createAnalyser(format)
{
return new Analyser({
fftSize: 8,
bufferSize: BUFSIZE,
'pcm-stream': {
channels: format.channels,
sampleRate: format.sampleRate,
bitDepth: format.bitDepth
}
});
}
var logFile = 'log.txt';
var logOptions = {flag: 'a'};
function sample()
{
if (analyser) {
const frequency = detectPitch(analyser._data)
console.log(frequency)
}
}
My goal is to find the most dominant audio frequency in a chunk of data so I can figure out the tone.
I found some code that supposedly does this with python
def getFreq( pkt ):
#Use FFT to determine the peak frequency of the last chunk
thefreq = 0
if len(pkt) == bufferSize*swidth:
indata = np.array(wave.struct.unpack("%dh"%(len(pkt)/swidth), pkt))*window
# filter out everything outside of our bandpass Hz
bp = np.fft.rfft(indata)
minFilterBin = (bandPass[0]/(sampleRate/bufferSize)) + 1
maxFilterBin = (bandPass[1]/(sampleRate/bufferSize)) - 1
for i in range(len(bp)):
if i < minFilterBin:
bp[i] = 0
if i > maxFilterBin:
bp[i] = 0
# Take the fft and square each value
fftData = abs(bp)**2
# find the maximum
which = fftData[1:].argmax() + 1
# Compute the magnitude of the sample we found
dB = 10*np.log10(1e-20+abs(bp[which]))
#avgdB = 10*np.log10(1e-20+abs(bp[which - 10:which + 10].mean()))
if dB >= minDbLevel:
# use quadratic interpolation around the max
if which != len(fftData)-1:
warnings.simplefilter("error")
try:
y0, y1, y2 = np.log(fftData[which-1:which+2:])
x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)
except RuntimeWarning:
return(-1)
# find the frequency and output it
warnings.simplefilter("always")
thefreq = (which + x1) * sampleRate/bufferSize
else:
thefreq = which * sampleRate/bufferSize
else:
thefreq = -1
return(thefreq)
Original answer:
I can not provide you with a solution but (hopefully) give you enough advice to solve the problem.
I would recommend that you save a part of the stream you want to analyze to a file and then take a look at the file with a spectrum analyzer (e.g. with Audacity). This allows you to determine if the 17kHz signal is present in the audio stream.
If the 17 kHz signal is present in the audio stream then you can filter the audio stream with a low pass filter (e.g. audio-biquad with type lowpass
and frequency at somewhere above 2 kHz).
If the 17 kHz signal is not present in the audio then you could try to increase the buffer size BUFSIZE
(currently set to 500 in your code). In the example on node-pitchfinder
's GitHub page they use a complete audio file for pitch detection. Depending on how the pitch detection algorithm is implemented the result might be different for larger chunks of audio data (i.e. a few seconds) compared to very short chunks (500 samples is around 11 ms at sample rate 44100). Start with a large value for BUFSIZE
(e.g. 44100 -> 1 second) and see if it makes a difference.
Explanation of the python code: The code uses FFT (fast fourier transform) to find out which frequencies are present in the audio signal and then searches for the frequency with the highest value. This usually works well for simple signals like a 2 kHz sine wave. You could use dsp.js which provides an FFT implementation if you want to implement it in javascript. However, it is quite a challenge to get this right without some knowledge of digital signal processing theory.
As a side note: the YIN algorithm does not use FFT, it is based on autocorrelation.
Update
The following script uses the fft data of audio-analyser
and searches for the maximum frequency. This approach is very basic and only works well for signals where just one frequency is very dominant. The YIN
algorithm is much better suited for pitch detection than this example.
const fs = require('fs');
const Lame = require('lame');
const Analyser = require('audio-analyser')
const Chunker = require('stream-chunker');
var analyser;
var fftSize = 4096;
var decoder = new Lame.Decoder();
decoder.on('format', format => {
analyser = createAnalyser(format);
decoder.pipe(analyser);
analyser.on('data', processSamples);
console.log(format);
});
var chunker = Chunker(fftSize);
var audio_stream = fs.createReadStream('./sine.mp3');
audio_stream.pipe(chunker);
chunker.pipe(decoder);
function createAnalyser(format) {
return new Analyser({
fftSize: fftSize,
frequencyBinCount: fftSize / 2,
sampleRate: format.sampleRate,
channels: format.channels,
bitDepth: format.bitDepth
});
}
function processSamples() {
if (analyser) {
var fftData = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(fftData);
var maxBin = fftData.indexOf(Math.max(...fftData));
var thefreq = maxBin * analyser.sampleRate / analyser.fftSize;
console.log(maxBin + " " + thefreq);
}
}
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With