|
- import 'dart:convert';
- import 'package:crypto/crypto.dart';
- import 'package:demo001/plugin/xunfei/audiotranslate/result_audio.dart';
- import 'package:demo001/plugin/xunfei/audiotranslate/result_test.dart';
- import 'package:web_socket_channel/web_socket_channel.dart';
- import 'package:intl/intl.dart';
- import 'package:flutter/services.dart' show rootBundle;
- import 'dart:io';
-
- typedef ResponseCallback = void Function(
- Xunfei_AudioTranslation_Result_Text text,
- Xunfei_AudioTranslation_Result_Audio audio); // 定义回调函数类型
-
- class Xunfei_AudioTranslation {
- static const int STATUS_FIRST_FRAME = 0;
- static const int STATUS_CONTINUE_FRAME = 1;
- static const int STATUS_LAST_FRAME = 2;
-
- // 静态变量保存唯一实例
- static Xunfei_AudioTranslation? _instance;
-
- final String appId;
- final String apiKey;
- final String apiSecret;
- final String host = "ws-api.xf-yun.com";
- final String httpProto = "HTTP/1.1";
- final String httpMethod = "GET";
- final String requestUri = "/v1/private/simult_interpretation";
- final String algorithm = "hmac-sha256";
- final int state = 0; //0未初始化 1已连接 2翻译中
- final String msg = "";
- WebSocketChannel? _channel;
- final ResponseCallback onResponse; // 回调函数类型
-
- late Xunfei_AudioTranslation_Result_Text currtest; //翻译结果对象 文本
- late Xunfei_AudioTranslation_Result_Audio curraudio; //翻译结果对象 音频
- Xunfei_AudioTranslation._internal({
- required this.appId,
- required this.apiKey,
- required this.apiSecret,
- required this.onResponse, // 在构造函数中传递回调
- });
-
- // 工厂构造函数
- factory Xunfei_AudioTranslation({
- required String appId,
- required String apiKey,
- required String apiSecret,
- required ResponseCallback onResponse,
- }) {
- _instance ??= Xunfei_AudioTranslation._internal(
- appId: appId,
- apiKey: apiKey,
- apiSecret: apiSecret,
- onResponse: onResponse,
- );
- return _instance!;
- }
-
- // 创建 WebSocket URL
- String _createUrl() {
- final now = DateTime.now();
- final date =
- DateFormat("EEE, dd MMM yyyy HH:mm:ss 'GMT'").format(now.toUtc());
- final signatureOrigin =
- "host: $host\ndate: $date\nGET $requestUri HTTP/1.1";
-
- // 使用 HmacUtil 计算 HMAC-SHA256 签名
- final signature = _hmacSha256(apiSecret, signatureOrigin);
-
- final authorization = base64.encode(utf8.encode(
- "api_key=\"$apiKey\", algorithm=\"hmac-sha256\", headers=\"host date request-line\", signature=\"$signature\""));
-
- final queryParams = {
- "authorization": authorization,
- "date": date,
- "host": host,
- "serviceId": "simult_interpretation"
- };
-
- final wsUri =
- 'ws://$host$requestUri?${Uri(queryParameters: queryParams).query}';
- return wsUri;
- }
-
- // 创建参数
- Map<String, dynamic> _createParams(
- String appId, int status, List<int> audio) {
- final param = {
- "header": {
- "app_id": appId,
- "status": status,
- },
- "parameter": {
- "ist": {
- "accent": "mandarin",
- "domain": "ist_ed_open",
- "language": "zh_cn",
- "vto": 15000,
- "eos": 150000
- },
- "streamtrans": {"from": "cn", "to": "en"},
- "tts": {
- "vcn": "x2_catherine",
- "tts_results": {
- "encoding": "raw",
- "sample_rate": 16000,
- "channels": 1,
- "bit_depth": 16,
- "frame_size": 0
- }
- }
- },
- "payload": {
- "data": {
- "audio": base64.encode(audio),
- "encoding": "raw",
- "sample_rate": 16000,
- "seq": 1,
- "status": status
- }
- }
- };
-
- return param;
- }
-
- // 使用SHA-256算法计算HMAC
- String _hmacSha256(String key, String message) {
- var keyBytes = utf8.encode(key); // 将密钥转为字节数组
- var messageBytes = utf8.encode(message); // 将消息转为字节数组
- var hmac = Hmac(sha256, keyBytes); // 创建 HMAC 对象,指定哈希算法和密钥
- var digest = hmac.convert(messageBytes); // 计算消息的哈希
- return base64.encode(digest.bytes); // 返回 base64 编码的哈希值
- }
-
- //测试sdk
- Future<void> start() async {
- String wsUrl = _createUrl();
- await _connect(wsUrl);
- await Future.delayed(const Duration(seconds: 3));
- return;
- }
-
- // 创建WebSocket连接
- Future<void> _connect(String url) async {
- _channel = WebSocketChannel.connect(Uri.parse(url));
- _channel?.stream.listen(
- (message) {
- onMessage(message);
- },
- onError: (error) {
- print('连接失败: $error');
- },
- onDone: () {
- print('WebSocket 连接已关闭');
- },
- cancelOnError: true,
- );
- Future.delayed(const Duration(seconds: 1));
- }
-
- // 上传音频
- Future<void> pushaudio(Stream<List<int>> audioStream) async {
- int frameSize = 1280; // 每一帧的音频大小
- double interval = 0.04; // 发送音频间隔(单位:s)
- int status = STATUS_FIRST_FRAME; // 音频的状态信息,标识音频是第一帧,还是中间帧、最后一帧
- currtest = Xunfei_AudioTranslation_Result_Text();
- int index = 0;
- List<int> buffer = [];
- try {
- await for (List<int> frame in audioStream) {
- // 将音频数据添加到 buffer
- buffer.addAll(frame);
- while (buffer.length >= frameSize) {
- List<int> sendFrame = buffer.sublist(0, frameSize);
- buffer = buffer.sublist(frameSize);
-
- // 判断是否读取到足够的帧
- if (index + frameSize <= buffer.length) {
- frame = buffer.sublist(index, index + frameSize);
- index += frameSize;
- } else {
- frame = buffer.sublist(index);
- index = buffer.length; // 结束
- }
-
- // 第一帧处理
- if (status == STATUS_FIRST_FRAME) {
- String data = json.encode(_createParams(appId, status, sendFrame));
- _channel?.sink.add(data);
- // print('第一帧已发送...' + data);
- status = STATUS_CONTINUE_FRAME;
- }
- // 中间帧处理
- else if (status == STATUS_CONTINUE_FRAME) {
- String data = json.encode(_createParams(appId, status, sendFrame));
- _channel?.sink.add(data);
- // print('中间帧已发送...');
- }
- // 最后一帧处理
- else if (status == STATUS_LAST_FRAME) {
- // print('最后一帧已发送...');
- String data = json.encode(_createParams(appId, status, sendFrame));
- _channel?.sink.add(data);
- break;
- }
- // 模拟音频采样间隔
- await Future.delayed(
- Duration(milliseconds: (interval * 1000).toInt()));
- }
- }
- status = STATUS_LAST_FRAME;
- String data = json.encode(_createParams(appId, status, []));
- _channel?.sink.add(data);
- } catch (e) {
- print("push msg: $e");
- }
-
- print('音频处理完成');
- }
-
- // 处理接收到的消息
- Future<void> onMessage(String message) async {
- try {
- print("收到的消息:$message");
- } catch (e) {
- print("receive msg, but parse exception: $e");
- }
-
- // 对结果进行解析
- var messageMap = json.decode(message);
- var status = messageMap["header"]["status"];
- var sid = messageMap["header"]["sid"];
-
- // 接收到的识别结果写到文本
- if (messageMap.containsKey('payload') &&
- messageMap['payload'].containsKey('recognition_results')) {
- var result = messageMap['payload']['recognition_results']['text'];
- var asrresult = utf8.decode(base64.decode(result));
- currtest.add(asrresult); //加入到结果对象中
- }
-
- // 接收到的翻译结果写到文本
- // if (messageMap['payload'].containsKey('streamtrans_results')) {
- // var result = messageMap['payload']['streamtrans_results']['text'];
- // var transresult = utf8.decode(base64.decode(result));
-
- // }
-
- // 把接收到的音频流合成PCM
- if (messageMap.containsKey('payload') &&
- messageMap['payload'].containsKey('tts_results')) {
- var audio = messageMap['payload']['tts_results']['audio'];
- var audioData = base64.decode(audio);
- curraudio.addAudioData(audioData);
- // var file = File('output/audio/trans.pcm');
- // await file.writeAsBytes(audioData, mode: FileMode.append);
- }
- onResponse(currtest, curraudio);
- if (status == 2) {
- print("数据处理完毕,等待实时转译结束!同传后的音频文件请到output/audio/目录查看...");
- await Future.delayed(Duration(seconds: 3));
- close();
- }
- return;
- }
-
- // 关闭WebSocket连接
- void close() {
- _channel?.sink.close();
- }
- }
|