ios speech recognition Error Domain=kAFAssistantErrorDomain Code=216 "(null)"

10,201

Solution 1

I had the same problem whilst following the same (excellent) tutorial, even when using the example code on GitHub. To solve it, I had to do two things:

Firstly, add request.endAudio() at the start of the code to stop recording in the startButtonTapped action. This marks the end of the recording. I see you've already done that in your sample code.

Secondly, in the recordAndRecognizeSpeech function, when 'recognitionTask' is started, if no speech was detected then 'result' will be nil and the error case is triggered. So, I tested for result != nil before attempting to assign the result.

So, the code for those two functions looks as follows: 1. Updated startButtonTapped:

@IBAction func startButtonTapped(_ sender: UIButton) {
    if isRecording {

        request.endAudio() // Added line to mark end of recording
        audioEngine.stop()

        if let node = audioEngine.inputNode {
            node.removeTap(onBus: 0)
        }
        recognitionTask?.cancel()

        isRecording = false
        startButton.backgroundColor = UIColor.gray

    } else {

        self.recordAndRecognizeSpeech()
        isRecording = true
        startButton.backgroundColor = UIColor.red
    }
}

And 2. Update within recordAndRecognizeSpeech from the recognitionTask = ... line:

    recognitionTask = speechRecognizer?.recognitionTask(with: request, resultHandler: { (result, error) in
        if result != nil { // check to see if result is empty (i.e. no speech found)
            if let result = result {
                let bestString = result.bestTranscription.formattedString
                self.detectedTextLabel.text = bestString

                var lastString: String = ""
                for segment in result.bestTranscription.segments {
                    let indexTo = bestString.index(bestString.startIndex, offsetBy: segment.substringRange.location)
                    lastString = bestString.substring(from: indexTo)
                }
                self.checkForColoursSaid(resultString: lastString)

            } else if let error = error {
                self.sendAlert(message: "There has been a speech recognition error")
                print(error)
            }
        }

    }) 

I hope that helps you.

Solution 2

This will prevent two errors: The above mentioned Code=216 and the 'SFSpeechAudioBufferRecognitionRequest cannot be re-used' error.

  1. Stop recognition with finish not with cancel

  2. Stop audio

like so:

    // stop recognition
    recognitionTask?.finish()
    recognitionTask = nil

    // stop audio
    request.endAudio()
    audioEngine.stop()
    audioEngine.inputNode.removeTap(onBus: 0) // Remove tap on bus when stopping recording.

P.S. audioEngine.inputNode seems to be no longer an optional value, therefore I used no if let construct.

Solution 3

hey i was getting the same error but now its working absoultely fine.hope this code helps to you too :).

import UIKit
import Speech

class SpeechVC: UIViewController {

@IBOutlet weak var slabel: UILabel!
@IBOutlet weak var sbutton: UIButton!

let audioEngine = AVAudioEngine()
let SpeechRecognizer : SFSpeechRecognizer? = SFSpeechRecognizer()
let request = SFSpeechAudioBufferRecognitionRequest()
var recognitionTask:SFSpeechRecognitionTask?

var isRecording = false
override func viewDidLoad() {
    super.viewDidLoad()


    self.requestSpeechAuthorization()

    // Do any additional setup after loading the view, typically from a nib.
}
func recordAndRecognizeSpeech()
{
    guard let node = audioEngine.inputNode else { return }
    let recordingFormat = node.outputFormat(forBus: 0)
    node.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { buffer , _ in

        self.request.append(buffer)
    }
    audioEngine.prepare()
    do
    {
        try audioEngine.start()
    }catch
    {
        return print(error)
    }
    guard let myRecognizer = SFSpeechRecognizer() else {
        return
    }
    if !myRecognizer.isAvailable
    {
        return
    }
    recognitionTask = SpeechRecognizer?.recognitionTask(with: request, resultHandler: { result, error in

        if let result = result
        {
            let bestString = result.bestTranscription.formattedString
            self.slabel.text = bestString

            var lastString : String = ""
            for segment in result.bestTranscription.segments
            {
                let indexTo = bestString.index(bestString.startIndex, offsetBy: segment.substringRange.location)
                lastString = bestString.substring(from: indexTo)
            }

        }else if let error = error
        {
            print(error)
        }
    })
}


@IBAction func startAction(_ sender: Any) {
    if isRecording == true
    {
        audioEngine.stop()
        recognitionTask?.cancel()
        isRecording = false
        sbutton.backgroundColor = UIColor.gray
    }
    else{
        self.recordAndRecognizeSpeech()
        isRecording = true
        sbutton.backgroundColor = UIColor.red
    }

}
func cancelRecording()
{
    audioEngine.stop()
    if let node = audioEngine.inputNode
    {
        audioEngine.inputNode?.removeTap(onBus: 0)
    }
    recognitionTask?.cancel()

}


func requestSpeechAuthorization()
{
    SFSpeechRecognizer.requestAuthorization { authStatus in
        OperationQueue.main.addOperation {
            switch authStatus
            {
            case .authorized :
                self.sbutton.isEnabled = true
            case .denied :
                self.sbutton.isEnabled = false
                self.slabel.text = "User denied access to speech recognition"
            case .restricted :
                self.sbutton.isEnabled = false
                self.slabel.text = "Speech Recognition is restricted on this Device"
            case .notDetermined :
                self.sbutton.isEnabled = false
                self.slabel.text = "Speech Recognition not yet authorized"
            }
        }

    }
}
}

Solution 4

I had this error because I was running the app on the Simulator. Running on a regular device solves the issue.

Share:
10,201
Peizheng Ma
Author by

Peizheng Ma

Updated on June 05, 2022

Comments

  • Peizheng Ma
    Peizheng Ma almost 2 years

    Basically I am learning ios speech recognition module following this tutorial: https://medium.com/ios-os-x-development/speech-recognition-with-swift-in-ios-10-50d5f4e59c48

    But when I test it on my iphone6, I always got this error: Error Domain=kAFAssistantErrorDomain Code=216 "(null)"

    I searched it on the internet, but find very rare info about this.

    Here is my code:

    //
    //  ViewController.swift
    //  speech_sample
    //
    //  Created by Peizheng Ma on 6/22/17.
    //  Copyright © 2017 Peizheng Ma. All rights reserved.
    //
    
    import UIKit
    import AVFoundation
    import Speech
    
    class ViewController: UIViewController, SFSpeechRecognizerDelegate {
    
    //MARK: speech recognize variables
    let audioEngine = AVAudioEngine()
    let speechRecognizer: SFSpeechRecognizer? = SFSpeechRecognizer(locale: Locale.init(identifier: "en-US"))
    var request = SFSpeechAudioBufferRecognitionRequest()
    var recognitionTask: SFSpeechRecognitionTask?
    var isRecording = false
    
    override func viewDidLoad() {
        // super.viewDidLoad()
        // get Authorization
        self.requestSpeechAuthorization()
    }
    
    override func didReceiveMemoryWarning() {
        super.didReceiveMemoryWarning()
        // Dispose of any resources that can be recreated.
    }
    
    //MARK: properties
    @IBOutlet weak var detectText: UILabel!
    @IBOutlet weak var startButton: UIButton!
    
    //MARK: actions
    @IBAction func startButtonTapped(_ sender: UIButton) {
        if isRecording == true {
    
    
            audioEngine.stop()
    //            if let node = audioEngine.inputNode {
    //                node.removeTap(onBus: 0)
    //            }
            audioEngine.inputNode?.removeTap(onBus: 0)
            // Indicate that the audio source is finished and no more audio will be appended
            self.request.endAudio()
    
            // Cancel the previous task if it's running
            if let recognitionTask = recognitionTask {
                recognitionTask.cancel()
                self.recognitionTask = nil
            }
    
    
            //recognitionTask?.cancel()
            //self.recognitionTask = nil
            isRecording = false
            startButton.backgroundColor = UIColor.gray
        } else {
            self.recordAndRecognizeSpeech()
            isRecording = true
            startButton.backgroundColor = UIColor.red
        }
    }
    
    //MARK: show alert
    func showAlert(title: String, message: String, handler: ((UIAlertAction) -> Swift.Void)? = nil) {
        DispatchQueue.main.async { [unowned self] in
            let alertController = UIAlertController(title: title, message: message, preferredStyle: .alert)
            alertController.addAction(UIAlertAction(title: "OK", style: .cancel, handler: handler))
            self.present(alertController, animated: true, completion: nil)
        }
    }
    
    //MARK: Recognize Speech
    func recordAndRecognizeSpeech() {
        // Setup Audio Session
        guard let node = audioEngine.inputNode else { return }
        let recordingFormat = node.outputFormat(forBus: 0)
        node.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { buffer, _ in
            self.request.append(buffer)
        }
        audioEngine.prepare()
        do {
            try audioEngine.start()
        } catch {
            self.showAlert(title: "SpeechNote", message: "There has been an audio engine error.", handler: nil)
            return print(error)
        }
        guard let myRecognizer = SFSpeechRecognizer() else {
            self.showAlert(title: "SpeechNote", message: "Speech recognition is not supported for your current locale.", handler: nil)
            return
        }
        if !myRecognizer.isAvailable {
            self.showAlert(title: "SpeechNote", message: "Speech recognition is not currently available. Check back at a later time.", handler: nil)
            // Recognizer is not available right now
            return
        }
        recognitionTask = speechRecognizer?.recognitionTask(with: request, resultHandler: { result, error in
            if let result = result {
    
                let bestString = result.bestTranscription.formattedString
                self.detectText.text = bestString
    
    //                var lastString: String = ""
    //                for segment in result.bestTranscription.segments {
    //                    let indexTo = bestString.index(bestString.startIndex, offsetBy: segment.substringRange.location)
    //                    lastString = bestString.substring(from: indexTo)
    //                }
    //                self.checkForColorsSaid(resultString: lastString)
            } else if let error = error {
                self.showAlert(title: "SpeechNote", message: "There has been a speech recognition error.", handler: nil)
                print(error)
            }
        })
    }
    
    //MARK: - Check Authorization Status
    func requestSpeechAuthorization() {
        SFSpeechRecognizer.requestAuthorization { authStatus in
            OperationQueue.main.addOperation {
                switch authStatus {
                case .authorized:
                    self.startButton.isEnabled = true
                case .denied:
                    self.startButton.isEnabled = false
                    self.detectText.text = "User denied access to speech recognition"
                case .restricted:
                    self.startButton.isEnabled = false
                    self.detectText.text = "Speech recognition restricted on this device"
                case .notDetermined:
                    self.startButton.isEnabled = false
                    self.detectText.text = "Speech recognition not yet authorized"
                }
            }
        }
    }
    
    
    }
    

    Thank you very much.

  • Noel
    Noel over 3 years
    While this answer seems almost too simple, it actually does fix both errors it claims to fix!
  • Ali123
    Ali123 almost 3 years
    THNAKS. Not sure why i expected the simulator to transcribe the text when most likely it will not have all the voice recognition modules. Online transcription works fine even on the simulator.