AudioKit амплитуда микрофона при воспроизведении музыки - PullRequest
0 голосов
/ 02 апреля 2019

Можно ли воспроизводить музыку, одновременно анализируя амплитуду микрофона?

В настоящее время я могу анализировать амплитуду микрофона, пока я не играю музыку. Если я играю музыку, амплитуда начинается с 0,0, затем повышается до 7,0+ и остается там навсегда. Но это происходит только при воспроизведении музыки, иначе все в порядке.

Вот мой синглтон, который анализирует микрофонный усилитель

//
//  G8Audiokit.swift

import Foundation
import AudioKit

class G8Audiokit{

    //Variables for Audio audioAnalysis
    var microphone: AKMicrophone! // Device Microphone
    var amplitudeTracker: AKAmplitudeTracker! // Tracks the amplitude of the microphone
    var signalBooster: AKBooster! // boosts the signal
    var audioAnalysisTimer: Timer? // Continuously calls audioAnalysis function
    let amplitudeBuffSize = 10 // Smaller buffer will yield more amplitude responiveness and instability, higher value will respond slower but will be smoother
    var amplitudeBuffer: [Double] // This stores a rolling window of amplitude values, used to get average amplitude

    public var onAmplitudeUpdate: ((_ value: Float) -> ())?

    static let sharedInstance = G8Audiokit()

    private init(){ //private because that way the class can only be initialized by itself.
        self.amplitudeBuffer = [Double](repeating: 0.0, count: amplitudeBuffSize)
        startAudioAnalysis()

    }

    /**
     Set up AudioKit Processing Pipeline and start the audio analysis.
     */
    func startAudioAnalysis(){

        stopAudioAnalysis()

        // Settings
        AKSettings.bufferLength = .medium // Set's the audio signal buffer size
        do {
            try AKSettings.setSession(category:.playAndRecord)
        } catch {
            AKLog("Could not set session category.")
        }

        // ----------------
        // Input + Pipeline

        // Initialize the built-in Microphone
        microphone = AKMicrophone()

        // Pre-processing
        signalBooster = AKBooster(microphone)
        signalBooster.gain = 5.0 // When video recording starts, the signal gets boosted to the equivalent of 5.0, so we're setting it to 5.0 here and changing it to 1.0 when we start video recording.

        // Filter out anything outside human voice range
        let highPass = AKHighPassFilter(signalBooster, cutoffFrequency: 55) // Lowered this a bit to be more sensitive to bass-drums
        let lowPass = AKLowPassFilter(highPass, cutoffFrequency: 255)
        //  At this point you don't have much signal left, so you balance it against the original signal!
        let rebalanced = AKBalancer(lowPass, comparator: signalBooster)

        // Track the amplitude of the rebalanced signal, we use this value for audio reactivity
        amplitudeTracker = AKAmplitudeTracker(rebalanced)

        // Mute the audio that gets routed to the device output, preventing feedback
        let silence = AKBooster(amplitudeTracker, gain:0)

        // We need to complete the chain, routing silenced audio to the output
        AudioKit.output = silence

        // Start the chain and timer callback
        do{ try AudioKit.start(); }
        catch{}

        audioAnalysisTimer = Timer.scheduledTimer(timeInterval: 0.01,
                                                  target: self,
                                                  selector: #selector(audioAnalysis),
                                                  userInfo: nil,
                                                  repeats: true)
        // Put the timer on the main thread so UI updates don't interrupt
        RunLoop.main.add(audioAnalysisTimer!, forMode: RunLoopMode.commonModes)
    }

    // Call this when closing the app or going to background
    public func stopAudioAnalysis(){
        audioAnalysisTimer?.invalidate()
        AudioKit.disconnectAllInputs() // Disconnect all AudioKit components, so they can be relinked when we call startAudioAnalysis()
    }

    // This is called on the audioAnalysisTimer
    @objc func audioAnalysis(){
        writeToBuffer(val: amplitudeTracker.amplitude) // Write an amplitude value to the rolling buffer
        let val = getBufferAverage()
        onAmplitudeUpdate?(Float(val))

    }

    // Writes amplitude values to a rolling window buffer, writes to index 0 and pushes the previous values to the right, removes the last value to preserve buffer length.
    func writeToBuffer(val: Double) {
        for (index, _) in amplitudeBuffer.enumerated() {
            if (index == 0) {
                amplitudeBuffer.insert(val, at: 0)
                _ = amplitudeBuffer.popLast()
            }
            else if (index < amplitudeBuffer.count-1) {
                amplitudeBuffer.rearrange(from: index-1, to: index+1)
            }
        }
    }

    // Returns the average of the amplitudeBuffer, resulting in a smoother audio reactivity signal
    func getBufferAverage() -> Double {
        var avg:Double = 0.0
        for val in amplitudeBuffer {
            avg = avg + val
        }
        avg = avg / amplitudeBuffer.count
        return avg
    }



}

А вот так я играю музыку в другом синглтоне

func playAudioFile(mp3FileName: String!, loopForEver: Bool = false) {
        guard let url = Bundle.main.url(forResource: mp3FileName, withExtension: "mp3") else { return }

        do {
            try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayback)
            try AVAudioSession.sharedInstance().setActive(true)

            // For iOS 11
            objPlayer = try AVAudioPlayer(contentsOf: url, fileTypeHint: AVFileType.mp3.rawValue)
            if (loopForEver){
                objPlayer?.numberOfLoops = -1
            }

            // For iOS versions < 11
            //objPlayer = try AVAudioPlayer(contentsOf: url, fileTypeHint: AVFileTypeMPEGLayer3)

            guard let aPlayer = objPlayer else { return }
            self.objPlayer?.stop()
            aPlayer.play()

        } catch let error {
            print(error.localizedDescription)
        }
    }

У меня такое чувство, что:

try AKSettings.setSession(category:.playAndRecord)

в моей реализации Audiokit может зависеть от:

try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayback)

Может ли это быть причиной? Вы рекомендуете другой способ добиться этого?

...