Android-AudioRecord-Frequenz filter-Schritt für Schritt

Android: ich bin neu auf voice-Prozess, die ich erstellt habe, AudioRecord Objekt mit sampleRate 8000Hz mit JTransform Bibliothek, die ich versuche zu filtern, Frequenz es gibt einige Dinge, die Sie im folgenden code die ich nicht verstehe meine Fragen wie folgt

F. 1 Warum wir konvertieren "toTransform[i] = (double) buffer[i] /32768.0; //signed 16 bit" - Puffer in 16bit Wert ?

Q. 2 Ritus, jetzt audioRecord Lesen von Daten-array ist kurz-array, wenn ich lese byte array wie ich es konvertieren in angesengt 16-bit ?

Q. 3 ich möchte Anzeige Ton-Frequenz in Hz, mit Doppel-array, wie kann ich berechnen, sound-Frequenz ?

F. 4 ich schrieb-filter-Methode filterAudio (), aber es ist keine Filterung Frequenzbereich ?

Bitte helfen Sie mir ich habe viele Fragen in meinem Kopf

/* Code wie folgt */

private final int[] mSampleRates = new int[] { 8000, 11025, 22050, 44100 };
final AudioRecord audioRecord = findAudioRecord();
                if(audioRecord == null){
                    return null;
                }

                final short[] buffer = new short[blockSize];
                final double[] toTransform = new double[blockSize];

                audioRecord.startRecording();

                while (started) {
                    Thread.sleep(100);
                    final int bufferReadResult = audioRecord.read(buffer, 0, blockSize);

                    for (int i = 0; i < blockSize && i < bufferReadResult; i++) {
                        toTransform[i] = (double) buffer[i] / 32768.0; //signed 16 bit
                    }

                    //Audio Filter passing frequency of mSampleRates[3]
                    filterAudio(bufferReadResult, toTransform, mSampleRates[3]);
                    transformer.realForward(toTransform);                   
                    publishProgress(toTransform);
                }
                audioRecord.stop();
                audioRecord.release();


public static void filterAudio(int bufferSize, double[] audioBuffer, float sampleRate ){

        //it is assumed that a float array audioBuffer exists with even length = to 
        //the capture size of your audio buffer

        //float frequency=0F;
        //The size of the FFT will be the size of your audioBuffer /2
        int FFT_SIZE = bufferSize / 2;
        //RealDoubleFFT mFFT = new RealDoubleFFT(FFT_SIZE);
        DoubleFFT_1D mFFT = new DoubleFFT_1D(FFT_SIZE); //this is a jTransforms type

        //Take the FFT
        mFFT.realForward(audioBuffer);
        //mFFT.ft(audioBuffer);

        //The first 1/2 of audioBuffer now contains bins that represent the frequency
        //of your wave, in a way.  To get the actual frequency from the bin:
        //frequency_of_bin = bin_index * sample_rate /FFT_SIZE

        //assuming the length of audioBuffer is even, the real and imaginary parts will be
        //stored as follows
        //audioBuffer[2*k] = Re[k], 0<=k<n/2
        //audioBuffer[2*k+1] = Im[k], 0<k<n/2

        //Define the frequencies of interest
        float freqMin = 14400;
        float freqMax = 14500;

        //Loop through the fft bins and filter frequencies
        for(int fftBin = 0; fftBin < FFT_SIZE; fftBin++){        
            //Calculate the frequency of this bin assuming a sampling rate of 44,100 Hz
            float frequency = (float)fftBin * sampleRate / (float)FFT_SIZE;

            //Now filter the audio, I'm assuming you wanted to keep the
            //frequencies of interest rather than discard them.
            if(frequency  < freqMin || frequency > freqMax){
                //Calculate the index where the real and imaginary parts are stored
                int real = 2 * fftBin;
                int imaginary = 2 * fftBin + 1;

                //zero out this frequency
                audioBuffer[real] = 0;
                audioBuffer[imaginary] = 0;
            }
        }

        //Take the inverse FFT to convert signal from frequency to time domain
        mFFT.realInverse(audioBuffer, false);
    }

final AudioRecord findAudioRecord() {
        for (int rate : mSampleRates) {
            for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_8BIT, AudioFormat.ENCODING_PCM_16BIT }) {
                for (short channelConfig : new short[] { AudioFormat.CHANNEL_CONFIGURATION_MONO , AudioFormat.CHANNEL_CONFIGURATION_STEREO }) {
                    try {

                        bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);

                        if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
                            //check if we can instantiate and have a success
                            AudioRecord recorder = new AudioRecord(AudioSource.DEFAULT, rate, channelConfig, audioFormat, bufferSize);

                            if (recorder.getState() == AudioRecord.STATE_INITIALIZED){
                                Log.d(TAG, "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: "
                                        + channelConfig);
                                return recorder;
                            }
                        }
                    } catch (Exception e) {
                        Log.e(TAG, rate + "Exception, keep trying.",e);
                    }
                }
            }
        }

        return null;
    }
InformationsquelleAutor d-man | 2012-06-07
Schreibe einen Kommentar