Skip to content

Instantly share code, notes, and snippets.

@philippe86220
Last active January 18, 2026 09:23
Show Gist options
  • Select an option

  • Save philippe86220/63105764bf3f5d49ac148f6d83bb9234 to your computer and use it in GitHub Desktop.

Select an option

Save philippe86220/63105764bf3f5d49ac148f6d83bb9234 to your computer and use it in GitHub Desktop.
Reconnaissance Vocale Basique

Reconnaissance Vocale Basique

Informations importantes concernant le micro et la reconnaissance vocale dans Xcode :

  1. Dans TARGETS - nom de l'application - App Sandbox - Signings & Capabilities : cochez -> Audio Input.

  2. Ensuite dans : TARGETS. → VotreApp. → Info. Ajoutez ces deux entrées :

Key Type Value
Privacy – Microphone Usage Description String Besoin du micro pour la dictée
Privacy – Speech Recognition Usage Description String Besoin de la reconnaissance vocale

Xcode les convertit automatiquement en :

  • NSMicrophoneUsageDescription
  • NSSpeechRecognitionUsageDescription

notes :

  • Il ne faut pas éditer un plist à la main, surtout s’il n’existe pas physiquement.
  • Au lancement de l'application -> donnez les autorisations nécessaires.
  • Ce code peut être le point de départ pour une application plus complexe.
import SwiftUI
struct ContentView: View {
@StateObject private var speechRecognizer = SpeechRecognizer()
@State private var isListening = false
@State private var userText = ""
var body: some View {
VStack(spacing: 16) {
Text("Reconnaissance vocale")
.font(.headline)
TextEditor(text: $userText)
.frame(minHeight: 220)
.padding(8)
.overlay(
RoundedRectangle(cornerRadius: 12)
.stroke(.gray.opacity(0.3), lineWidth: 1)
)
.background(.thinMaterial)
.cornerRadius(12)
.font(.title)
HStack(spacing: 16) {
Button(isListening ? "Stop" : "Demarrer") {
if isListening {
speechRecognizer.stopTranscription()
isListening = false
} else {
speechRecognizer.startTranscription()
isListening = true
}
}
.foregroundColor(isListening ? .red : .blue)
Button("Effacer") {
userText = ""
speechRecognizer.partialResult = ""
speechRecognizer.finalResult = ""
}
.disabled(isListening)
}
}
.padding()
.onReceive(speechRecognizer.$partialResult) { partial in
// Option 2 : mise a jour en live (remplace le texte pendant l'ecoute)
// A activer uniquement si c'est bien ce que vous voulez.
guard isListening else { return }
guard !partial.isEmpty else { return }
userText = partial
}
}
}
import Foundation
import Speech
import AVFoundation
import Combine
final class SpeechRecognizer: ObservableObject {
private var speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: "fr-FR"))
private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
private var recognitionTask: SFSpeechRecognitionTask?
private var audioEngine = AVAudioEngine()
@Published var partialResult: String = ""
@Published var finalResult: String = ""
func startTranscription() {
finalResult = ""
partialResult = ""
SFSpeechRecognizer.requestAuthorization { authStatus in
DispatchQueue.main.async {
switch authStatus {
case .authorized:
self.startRecognitionSession()
case .denied, .restricted, .notDetermined:
print("Autorisation de reconnaissance vocale refusee")
@unknown default:
print("Etat d'autorisation inconnu")
}
}
}
}
private func startRecognitionSession() {
if audioEngine.isRunning {
stopTranscription()
}
let request = SFSpeechAudioBufferRecognitionRequest()
request.shouldReportPartialResults = true
recognitionRequest = request
let inputNode = audioEngine.inputNode
recognitionTask = speechRecognizer?.recognitionTask(with: request) { [weak self] result, error in
guard let self = self else { return }
if let result = result {
self.partialResult = result.bestTranscription.formattedString
if result.isFinal {
self.finalResult = result.bestTranscription.formattedString
}
}
if let error = error {
print("Erreur reconnaissance : \(error.localizedDescription)")
self.stopTranscription()
}
}
let format = inputNode.outputFormat(forBus: 0)
inputNode.removeTap(onBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: format) { [weak self] buffer, _ in
self?.recognitionRequest?.append(buffer)
}
audioEngine.prepare()
do {
try audioEngine.start()
} catch {
print("Echec demarrage moteur audio : \(error.localizedDescription)")
}
}
func stopTranscription() {
if audioEngine.isRunning {
audioEngine.stop()
audioEngine.inputNode.removeTap(onBus: 0)
recognitionRequest?.endAudio()
}
DispatchQueue.main.asyncAfter(deadline: .now() + 0.6) {
self.recognitionTask?.cancel()
self.recognitionTask = nil
self.recognitionRequest = nil
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment