import 'dart:async'; import 'dart:convert'; import 'dart:math'; import 'dart:typed_data'; import 'package:demo001/tools/audio_tool.dart'; import 'package:demo001/xunfei/recognition_result/recognition_result.dart'; import 'package:demo001/xunfei/streamtrans_result/streamtrans_result.dart'; import 'package:intl/intl.dart'; import 'package:demo001/xunfei/utils.dart'; import 'package:web_socket_channel/web_socket_channel.dart'; //讯飞翻译 class XunFeiTranslate { final int _chunkSize = 1280; // 每次发送的数据大小 // 音量阈值 final double _speakingThreshold = 50.0; // 开始说话的阈值 final double _silenceThreshold = 30.0; // 结束说话的阈值 final Duration _silenceDuration = Duration(seconds: 1); // 持续低于阈值的时间 DateTime? _lastBelowThresholdTime; // 上次音量低于阈值的时间 double _volume = 0; //当前原因 Uint8List _buff = Uint8List(0); //音频缓存区 bool _isrecord = false; //是否录音 bool _isspeaking = false; //是否说话 Timer? _timer; XunFeiTranslateTask? currtask; late Function(RecognitionResult) onRecognitionResult; late Function(StreamtransResult) onStreamtransResult; late Function(AudioModel) onAudioResult; final String appId; final String apiKey; final String apiSecret; final String host = "ws-api.xf-yun.com"; final String requestUri = "/v1/private/simult_interpretation"; //静态变量保存唯一实例 static XunFeiTranslate? _instance; XunFeiTranslate._internal( {required this.appId, required this.apiKey, required this.apiSecret, required this.onRecognitionResult, required this.onStreamtransResult, required this.onAudioResult}); //工厂构造函数 factory XunFeiTranslate({ required String appId, required String apiKey, required String apiSecret, required Function(RecognitionResult) onRecognitionResult, required Function(StreamtransResult) onStreamtransResult, required Function(AudioModel) onAudioResult, }) { _instance ??= XunFeiTranslate._internal( appId: appId, apiKey: apiKey, apiSecret: apiSecret, onRecognitionResult: onRecognitionResult, onStreamtransResult: onStreamtransResult, onAudioResult: onAudioResult, ); return _instance!; } //获取链接地址 String _geturl() { final now = DateTime.now(); final date = DateFormat("EEE, dd MMM yyyy HH:mm:ss 'GMT'").format(now.toUtc()); final signatureOrigin = "host: $host\ndate: $date\nGET $requestUri HTTP/1.1"; // 使用 HmacUtil 计算 HMAC-SHA256 签名 final signature = XunfeiUtils.hmacSha256(apiSecret, signatureOrigin); final authorization = base64.encode(utf8.encode( "api_key=\"$apiKey\", algorithm=\"hmac-sha256\", headers=\"host date request-line\", signature=\"$signature\"")); final queryParams = { "authorization": authorization, "date": date, "host": host, "serviceId": "simult_interpretation" }; final wsUri = 'ws://$host$requestUri?${Uri(queryParameters: queryParams).query}'; return wsUri; } //创建参数 Map _createParams(int status, Uint8List audio) { final param = { "header": { "app_id": appId, "status": status, }, "parameter": { "ist": { "accent": "mandarin", "domain": "ist_ed_open", "language": "zh_cn", "vto": 15000, "eos": 150000 }, "streamtrans": {"from": "cn", "to": "en"}, "tts": { "vcn": "x2_catherine", "tts_results": { "encoding": "raw", "sample_rate": 16000, "channels": 1, "bit_depth": 16, "frame_size": 0 } } }, "payload": { "data": { "audio": base64.encode(audio), "encoding": "raw", "sample_rate": 16000, "seq": 1, "status": status } } }; return param; } //开始同时翻译 Future starttranslate(Stream stream) async { _isrecord = true; stream.listen((data) { if (_isrecord) { _buff = _appendToBuffer(data); } }); _timer = Timer.periodic(Duration(milliseconds: 40), (timer) async { //每40毫秒读取一次数据 var frame = _getAudioData(); _volume = _calculateAmplitude(frame); var state = _checkSpeakingStatus(); if (state == 1) { //开始说话 currtask = XunFeiTranslateTask(_geturl(), _handleData); currtask?.sendaudio(_createParams(0, frame)); print("发送第一帧---------------------------"); } else if (state == 2) { //结束说话 currtask?.sendaudio(_createParams(1, frame)); } else if (state == 3) { //结束说话 currtask?.sendaudio(_createParams(2, frame)); print("发送最后一帧---------------------------"); } }); return; } //结束翻译 Future stoptranslate() async { _isrecord = false; _timer?.cancel(); _timer = null; if (currtask != null) { var _frame = _getAudioData(); currtask?.sendaudio(_createParams(2, _frame)); print("发送最后一帧---------------------------"); currtask = null; } _isspeaking = false; _lastBelowThresholdTime = null; _buff = Uint8List(0); return; } //写入音频数据到缓存区中 Uint8List _appendToBuffer(Uint8List newData) { var newBuffer = Uint8List(_buff.length + newData.length); newBuffer.setAll(0, _buff); newBuffer.setAll(_buff.length, newData); return newBuffer; } //读取缓存区中一帧数据 Uint8List _getAudioData() { if (_buff.length >= _chunkSize) { // 从缓冲区中读取1280字节的数据 var data = _buff.sublist(0, _chunkSize); // 移除已读取的数据 _buff = _buff.sublist(_chunkSize); return data; } else if (_buff.length >= 0) { return _buff; } else { // 如果数据不足,返回空数据 return Uint8List(0); } } //当前音量计算 double _calculateAmplitude(Uint8List data) { Int16List samples = Int16List(data.length ~/ 2); ByteData byteData = ByteData.view(data.buffer); for (int i = 0; i < samples.length; i++) { samples[i] = byteData.getInt16(i * 2, Endian.little); } double sum = 0; for (int sample in samples) { sum += (sample * sample); } double rms = sqrt(sum / samples.length); return rms; } // 检查说话状态 int _checkSpeakingStatus() { if (_volume > _speakingThreshold && !_isspeaking) { // 音量高于阈值,表示开始说话 _isspeaking = true; return 1; } else if (_volume < _silenceThreshold) { // 音量低于阈值 if (_lastBelowThresholdTime == null) { // 记录第一次低于阈值的时间 _lastBelowThresholdTime = DateTime.now(); } else if (DateTime.now().difference(_lastBelowThresholdTime!) > _silenceDuration) { // 持续低于阈值超过指定时间,表示结束说话 _isspeaking = false; return 3; } } else { // 音量恢复到阈值以上,重置计时器 _lastBelowThresholdTime = null; } if (!_isspeaking) { return 0; } else { return 2; } } //处理返回结果 void _handleData(dynamic json) { final status = json['header']['status']; final sid = json['header']['sid']; var payload = json['payload']; if (payload != null) { payload = json['payload'] as Map; //转文字的结果 if (payload.containsKey('recognition_results')) { final model = RecognitionResult.fromJson(json); if (model.payload?.recognitionResults?.text == null || model.payload?.recognitionResults?.text?.trim() == '') return; onRecognitionResult(model); //翻译好的结果 } else if (payload.containsKey('streamtrans_results')) { final model = StreamtransResult.fromJson(json); if (model.payload?.streamtransResults?.text == null || model.payload?.streamtransResults?.text?.trim() == '') return; onStreamtransResult(model); //合成语音 } else if (payload.containsKey('tts_results')) { final bytes = base64Decode(payload['tts_results']['audio']); if (bytes.isEmpty) return; onAudioResult(AudioModel(status: status, sid: sid, data: bytes)); } } if (status == 2) {} } } //讯飞翻译任务 class XunFeiTranslateTask { late WebSocketChannel _channel; bool isconnected = false; late Function(dynamic) handle; XunFeiTranslateTask(String url, Function(dynamic) handle) { _channel = WebSocketChannel.connect(Uri.parse(url)); _channel.stream.timeout(Duration(seconds: 10)); //设置超时时间 _channel.stream.listen( (message) { onMessage(message); }, onError: (error) { isconnected = false; print('连接失败: $error'); }, onDone: () { isconnected = false; print('WebSocket 连接已关闭'); print('Close code: ${_channel?.closeCode}'); print('Close reason: ${_channel?.closeReason}'); }, cancelOnError: true, ); isconnected = true; handle = handle; } Future sendaudio(Map data) async { if (isconnected) { _channel.sink.add(json.encode(data)); } } Future onMessage(String message) async { try { // print("收到的消息:$message"); // 对结果进行解析 var messageMap = json.decode(message); var status = messageMap["header"]["status"]; handle(messageMap); if (status == 2) { print("任务已结束!------------------------------------------"); _channel.sink.close(); } } catch (e) { print("收到的消息 异常:$e"); } return; } }