Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.
 
 
 
 
 
 

274 строки
8.7 KiB

  1. import 'dart:convert';
  2. import 'package:crypto/crypto.dart';
  3. import 'package:demo001/plugin/xunfei/audiotranslate/result_audio.dart';
  4. import 'package:demo001/plugin/xunfei/audiotranslate/result_test.dart';
  5. import 'package:web_socket_channel/web_socket_channel.dart';
  6. import 'package:intl/intl.dart';
  7. import 'package:flutter/services.dart' show rootBundle;
  8. import 'dart:io';
  9. typedef ResponseCallback = void Function(
  10. Xunfei_AudioTranslation_Result_Text text,
  11. Xunfei_AudioTranslation_Result_Audio audio); // 定义回调函数类型
  12. class Xunfei_AudioTranslation {
  13. static const int STATUS_FIRST_FRAME = 0;
  14. static const int STATUS_CONTINUE_FRAME = 1;
  15. static const int STATUS_LAST_FRAME = 2;
  16. // 静态变量保存唯一实例
  17. static Xunfei_AudioTranslation? _instance;
  18. final String appId;
  19. final String apiKey;
  20. final String apiSecret;
  21. final String host = "ws-api.xf-yun.com";
  22. final String httpProto = "HTTP/1.1";
  23. final String httpMethod = "GET";
  24. final String requestUri = "/v1/private/simult_interpretation";
  25. final String algorithm = "hmac-sha256";
  26. final int state = 0; //0未初始化 1已连接 2翻译中
  27. final String msg = "";
  28. WebSocketChannel? _channel;
  29. final ResponseCallback onResponse; // 回调函数类型
  30. late Xunfei_AudioTranslation_Result_Text currtest; //翻译结果对象 文本
  31. late Xunfei_AudioTranslation_Result_Audio curraudio; //翻译结果对象 音频
  32. Xunfei_AudioTranslation._internal({
  33. required this.appId,
  34. required this.apiKey,
  35. required this.apiSecret,
  36. required this.onResponse, // 在构造函数中传递回调
  37. });
  38. // 工厂构造函数
  39. factory Xunfei_AudioTranslation({
  40. required String appId,
  41. required String apiKey,
  42. required String apiSecret,
  43. required ResponseCallback onResponse,
  44. }) {
  45. _instance ??= Xunfei_AudioTranslation._internal(
  46. appId: appId,
  47. apiKey: apiKey,
  48. apiSecret: apiSecret,
  49. onResponse: onResponse,
  50. );
  51. return _instance!;
  52. }
  53. // 创建 WebSocket URL
  54. String _createUrl() {
  55. final now = DateTime.now();
  56. final date =
  57. DateFormat("EEE, dd MMM yyyy HH:mm:ss 'GMT'").format(now.toUtc());
  58. final signatureOrigin =
  59. "host: $host\ndate: $date\nGET $requestUri HTTP/1.1";
  60. // 使用 HmacUtil 计算 HMAC-SHA256 签名
  61. final signature = _hmacSha256(apiSecret, signatureOrigin);
  62. final authorization = base64.encode(utf8.encode(
  63. "api_key=\"$apiKey\", algorithm=\"hmac-sha256\", headers=\"host date request-line\", signature=\"$signature\""));
  64. final queryParams = {
  65. "authorization": authorization,
  66. "date": date,
  67. "host": host,
  68. "serviceId": "simult_interpretation"
  69. };
  70. final wsUri =
  71. 'ws://$host$requestUri?${Uri(queryParameters: queryParams).query}';
  72. return wsUri;
  73. }
  74. // 创建参数
  75. Map<String, dynamic> _createParams(
  76. String appId, int status, List<int> audio) {
  77. final param = {
  78. "header": {
  79. "app_id": appId,
  80. "status": status,
  81. },
  82. "parameter": {
  83. "ist": {
  84. "accent": "mandarin",
  85. "domain": "ist_ed_open",
  86. "language": "zh_cn",
  87. "vto": 15000,
  88. "eos": 150000
  89. },
  90. "streamtrans": {"from": "cn", "to": "en"},
  91. "tts": {
  92. "vcn": "x2_catherine",
  93. "tts_results": {
  94. "encoding": "raw",
  95. "sample_rate": 16000,
  96. "channels": 1,
  97. "bit_depth": 16,
  98. "frame_size": 0
  99. }
  100. }
  101. },
  102. "payload": {
  103. "data": {
  104. "audio": base64.encode(audio),
  105. "encoding": "raw",
  106. "sample_rate": 16000,
  107. "seq": 1,
  108. "status": status
  109. }
  110. }
  111. };
  112. return param;
  113. }
  114. // 使用SHA-256算法计算HMAC
  115. String _hmacSha256(String key, String message) {
  116. var keyBytes = utf8.encode(key); // 将密钥转为字节数组
  117. var messageBytes = utf8.encode(message); // 将消息转为字节数组
  118. var hmac = Hmac(sha256, keyBytes); // 创建 HMAC 对象,指定哈希算法和密钥
  119. var digest = hmac.convert(messageBytes); // 计算消息的哈希
  120. return base64.encode(digest.bytes); // 返回 base64 编码的哈希值
  121. }
  122. //测试sdk
  123. Future<void> start() async {
  124. String wsUrl = _createUrl();
  125. await _connect(wsUrl);
  126. await Future.delayed(const Duration(seconds: 3));
  127. return;
  128. }
  129. // 创建WebSocket连接
  130. Future<void> _connect(String url) async {
  131. _channel = WebSocketChannel.connect(Uri.parse(url));
  132. _channel?.stream.listen(
  133. (message) {
  134. onMessage(message);
  135. },
  136. onError: (error) {
  137. print('连接失败: $error');
  138. },
  139. onDone: () {
  140. print('WebSocket 连接已关闭');
  141. },
  142. cancelOnError: true,
  143. );
  144. Future.delayed(const Duration(seconds: 1));
  145. }
  146. // 上传音频
  147. Future<void> pushaudio(Stream<List<int>> audioStream) async {
  148. int frameSize = 1280; // 每一帧的音频大小
  149. double interval = 0.04; // 发送音频间隔(单位:s)
  150. int status = STATUS_FIRST_FRAME; // 音频的状态信息,标识音频是第一帧,还是中间帧、最后一帧
  151. currtest = Xunfei_AudioTranslation_Result_Text();
  152. int index = 0;
  153. List<int> buffer = [];
  154. try {
  155. await for (List<int> frame in audioStream) {
  156. // 将音频数据添加到 buffer
  157. buffer.addAll(frame);
  158. while (buffer.length >= frameSize) {
  159. List<int> sendFrame = buffer.sublist(0, frameSize);
  160. buffer = buffer.sublist(frameSize);
  161. // 判断是否读取到足够的帧
  162. if (index + frameSize <= buffer.length) {
  163. frame = buffer.sublist(index, index + frameSize);
  164. index += frameSize;
  165. } else {
  166. frame = buffer.sublist(index);
  167. index = buffer.length; // 结束
  168. }
  169. // 第一帧处理
  170. if (status == STATUS_FIRST_FRAME) {
  171. String data = json.encode(_createParams(appId, status, sendFrame));
  172. _channel?.sink.add(data);
  173. // print('第一帧已发送...' + data);
  174. status = STATUS_CONTINUE_FRAME;
  175. }
  176. // 中间帧处理
  177. else if (status == STATUS_CONTINUE_FRAME) {
  178. String data = json.encode(_createParams(appId, status, sendFrame));
  179. _channel?.sink.add(data);
  180. // print('中间帧已发送...');
  181. }
  182. // 最后一帧处理
  183. else if (status == STATUS_LAST_FRAME) {
  184. // print('最后一帧已发送...');
  185. String data = json.encode(_createParams(appId, status, sendFrame));
  186. _channel?.sink.add(data);
  187. break;
  188. }
  189. // 模拟音频采样间隔
  190. await Future.delayed(
  191. Duration(milliseconds: (interval * 1000).toInt()));
  192. }
  193. }
  194. status = STATUS_LAST_FRAME;
  195. String data = json.encode(_createParams(appId, status, []));
  196. _channel?.sink.add(data);
  197. } catch (e) {
  198. print("push msg: $e");
  199. }
  200. print('音频处理完成');
  201. }
  202. // 处理接收到的消息
  203. Future<void> onMessage(String message) async {
  204. try {
  205. print("收到的消息:$message");
  206. } catch (e) {
  207. print("receive msg, but parse exception: $e");
  208. }
  209. // 对结果进行解析
  210. var messageMap = json.decode(message);
  211. var status = messageMap["header"]["status"];
  212. var sid = messageMap["header"]["sid"];
  213. // 接收到的识别结果写到文本
  214. if (messageMap.containsKey('payload') &&
  215. messageMap['payload'].containsKey('recognition_results')) {
  216. var result = messageMap['payload']['recognition_results']['text'];
  217. var asrresult = utf8.decode(base64.decode(result));
  218. currtest.add(asrresult); //加入到结果对象中
  219. }
  220. // 接收到的翻译结果写到文本
  221. // if (messageMap['payload'].containsKey('streamtrans_results')) {
  222. // var result = messageMap['payload']['streamtrans_results']['text'];
  223. // var transresult = utf8.decode(base64.decode(result));
  224. // }
  225. // 把接收到的音频流合成PCM
  226. if (messageMap.containsKey('payload') &&
  227. messageMap['payload'].containsKey('tts_results')) {
  228. var audio = messageMap['payload']['tts_results']['audio'];
  229. var audioData = base64.decode(audio);
  230. curraudio.addAudioData(audioData);
  231. // var file = File('output/audio/trans.pcm');
  232. // await file.writeAsBytes(audioData, mode: FileMode.append);
  233. }
  234. onResponse(currtest, curraudio);
  235. if (status == 2) {
  236. print("数据处理完毕,等待实时转译结束!同传后的音频文件请到output/audio/目录查看...");
  237. await Future.delayed(Duration(seconds: 3));
  238. close();
  239. }
  240. return;
  241. }
  242. // 关闭WebSocket连接
  243. void close() {
  244. _channel?.sink.close();
  245. }
  246. }