Bläddra i källkod

上传代码

master
awei 5 månader sedan
förälder
incheckning
1c9249d268
4 ändrade filer med 470 tillägg och 33 borttagningar
  1. +64
    -33
      lib/scenes/SoundRecordScene.dart
  2. +341
    -0
      lib/xunfei/task_trans.dart
  3. +14
    -0
      lib/xunfei/utils.dart
  4. +51
    -0
      lib/xunfei/xunfei.dart

+ 64
- 33
lib/scenes/SoundRecordScene.dart Visa fil

@@ -1,3 +1,7 @@
import 'dart:async';
import 'dart:typed_data';

import 'package:demo001/xunfei/xunfei.dart';
import 'package:flutter/material.dart';
import 'package:path_provider/path_provider.dart';
import 'package:flutter_sound/flutter_sound.dart';
@@ -10,40 +14,39 @@ class SoundRecordScene extends StatefulWidget {
}

class _SoundRecordSceneState extends State<SoundRecordScene> {
late ISDK _sdk;
FlutterSoundRecorder? _soundRecorder;
AudioPlayer? _audioPlayer;
bool _isRecorderReady = false;
bool _isRecording = false;
bool _isSpeaking = false; // 是否正在说话
bool _isSpeaking = false; //是否说话
int _stateSpeak = 0; // 说话状态 0 未说话 1开始说话 2 说话中 3结束说话
String? _audioFilePath;
double _volumeLevel = 0.0; // 当前音量值
DateTime? _lastBelowThresholdTime; // 上次音量低于阈值的时间
ScrollController _scrollController = ScrollController();
List<String> _logs = [];

List<ITaskTrans> _trans = [];
late ITaskTrans _lasttran;
// 音量阈值
final double _speakingThreshold = -30.0; // 开始说话的阈值
final double _silenceThreshold = -40.0; // 结束说话的阈值
final double _speakingThreshold = 50.0; // 开始说话的阈值
final double _silenceThreshold = 30.0; // 结束说话的阈值
final Duration _silenceDuration = Duration(seconds: 1); // 持续低于阈值的时间

// 采样率和声道数
Codec _audiocodec = Codec.pcm16;
final int _sampleRate = 16000; // 16kHz 采样率
final int _numChannels = 1; // 单声道

StreamController<Uint8List> _audioDataStreamController = StreamController<Uint8List>.broadcast();
//暴露音频数据流
Stream<Uint8List> get audioDataStream => _audioDataStreamController.stream;
@override
void initState() {
super.initState();
_soundRecorder = FlutterSoundRecorder();
// 监听音量变化
_soundRecorder?.onProgress?.listen((event) {
_log('onProgress 回调触发, 分贝: ${event.decibels}');
if (event.decibels != null) {
setState(() {
_volumeLevel = event.decibels!; // 更新音量值
});
_checkSpeakingStatus(); // 检查说话状态
}
});
_sdk = Xunfei(
appId: "137dc132",
apiKey: "1c1891a475e71250ecd1320303ad6545",
apiSecret: "MjZhNDA1NTI1NWZkZDQxOTMxYzMyN2Yw");
_audioPlayer = AudioPlayer();
_requestPermissions();
_initRecorder();
@@ -52,7 +55,14 @@ class _SoundRecordSceneState extends State<SoundRecordScene> {
// 初始化录音器
void _initRecorder() async {
try {
_soundRecorder = FlutterSoundRecorder();
await _soundRecorder?.openRecorder();
await _soundRecorder?.setSubscriptionDuration(const Duration(milliseconds: 100));
//检查编解码器是否支持
if (!await _soundRecorder!.isEncoderSupported(Codec.pcm16)) {
_log("PCM16 codec is not supported on this device.");
_audiocodec = Codec.aacADTS;
}
setState(() {
_isRecorderReady = true;
});
@@ -91,20 +101,34 @@ class _SoundRecordSceneState extends State<SoundRecordScene> {
_log('录音器未准备好');
return;
}

if (_isRecording) return; // 防止重复调用

final directory = await getTemporaryDirectory();
final tempPath = '${directory.path}/recorded_audio.aac';
final tempPath = '${directory.path}/recorded_audio.pcm';
_log('录音文件路径: $tempPath');

await _soundRecorder?.startRecorder(
toFile: tempPath,
codec: Codec.aacADTS,
codec: _audiocodec,
toStream: _audioDataStreamController.sink, // 将音频数据写入到 StreamController
sampleRate: _sampleRate, // 设置采样率
numChannels: _numChannels, // 设置声道数
enableVoiceProcessing: false, // 启用音量监听
enableVoiceProcessing: true, // 启用音量监听
);
_soundRecorder?.onProgress!.listen((RecordingDisposition event) {
// _log('onProgress 回调触发, 分贝: ${event.decibels}');
if (event.decibels != null) {
setState(() {
_volumeLevel = event.decibels!; //更新音量值
});
_checkSpeakingStatus(); // 检查说话状态
}
});
// 监听音频数据流
_audioDataStreamController.stream.listen((Uint8List audioData) {
if (_isSpeaking){
// _log('Received audio data: ${audioData.length} bytes');
_lasttran.addAudioData(List.from(audioData));
}
// 这里可以进一步处理音频数据,例如保存到文件或上传到服务器
});
setState(() {
_audioFilePath = tempPath;
_isRecording = true;
@@ -119,7 +143,9 @@ class _SoundRecordSceneState extends State<SoundRecordScene> {
void _stopRecording() async {
try {
if (!_isRecording) return; // 防止重复调用
await _soundRecorder?.stopRecorder();
await _soundRecorder?.stopRecorder();
await _soundRecorder?.closeRecorder();
await _lasttran.close();
setState(() {
_isRecording = false;
_volumeLevel = 0.0; //重置音量值
@@ -132,24 +158,26 @@ class _SoundRecordSceneState extends State<SoundRecordScene> {

// 播放录音
void _playRecording() async {
try {
if (_audioFilePath != null) {
await _audioPlayer?.play(DeviceFileSource(_audioFilePath!));
_log('播放录音');
}
} catch (e) {
_log('播放录音 异常: $e');
}
// try {
// if (_audioFilePath != null) {
// await _audioPlayer?.play(DeviceFileSource(_audioFilePath!));
// _log('播放录音');
// }
// } catch (e) {
// _log('播放录音 异常: $e');
// }
}

// 检查说话状态
void _checkSpeakingStatus() {
_checkSpeakingStatus() {
if (_volumeLevel > _speakingThreshold && !_isSpeaking) {
// 音量高于阈值,表示开始说话
setState(() {
_isSpeaking = true;
});
_log('开始说话');
_stateSpeak = 1;
_lasttran = _sdk.createTransTask();
} else if (_volumeLevel < _silenceThreshold) {
// 音量低于阈值
if (_lastBelowThresholdTime == null) {
@@ -163,12 +191,15 @@ class _SoundRecordSceneState extends State<SoundRecordScene> {
_isSpeaking = false;
});
_log('结束说话');
_stateSpeak = 3;
_lasttran.close();
}
}
} else {
// 音量恢复到阈值以上,重置计时器
_lastBelowThresholdTime = null;
}
_stateSpeak = 2;
}

// 添加日志信息并自动滚动


+ 341
- 0
lib/xunfei/task_trans.dart Visa fil

@@ -0,0 +1,341 @@
//讯飞的翻译任务
import 'dart:async';
import 'dart:convert';
import 'dart:ffi';
import 'dart:typed_data';

import 'package:audioplayers/audioplayers.dart';
import 'package:demo001/xunfei/utils.dart';
import 'package:demo001/xunfei/xunfei.dart';
import 'package:intl/intl.dart';
import 'package:web_socket_channel/web_socket_channel.dart';

class XunferTask_Result_Text_Item {
final int sn;
final String pgs;
final List<int> rg;
final List<dynamic> ws;

XunferTask_Result_Text_Item({
required this.sn,
required this.pgs,
required this.rg,
required this.ws,
});
}

class XunferTaskTrans implements ITaskTrans {
static const int STATUS_FIRST_FRAME = 0;
static const int STATUS_CONTINUE_FRAME = 1;
static const int STATUS_LAST_FRAME = 2;

final String appId;
final String apiKey;
final String apiSecret;
final String host = "ws-api.xf-yun.com";
final String requestUri = "/v1/private/simult_interpretation";
late String url;
late WebSocketChannel? _channel;
bool isconnected = false;
// 数据流控制器
final StreamController<List<int>> _streamController =
StreamController<List<int>>();
// 数据流
Stream<List<int>> get stream => _streamController.stream;
// 是否正在运行
bool _isRunning = false;
// 是否流已关闭
bool _isStreamClosed = false;
// 上传任务的 Future
Future<void>? _uploadTask;
final Map<int, XunferTask_Result_Text_Item> tests = {};
final StreamController<Uint8List> _transtreamController =
StreamController<Uint8List>();
bool _isPlaying = false;
XunferTaskTrans({
required this.appId,
required this.apiKey,
required this.apiSecret,
}) {
url = _geturl();
_connect();
_startUploadTask();
}

//获取链接地址
String _geturl() {
final now = DateTime.now();
final date =
DateFormat("EEE, dd MMM yyyy HH:mm:ss 'GMT'").format(now.toUtc());
final signatureOrigin =
"host: $host\ndate: $date\nGET $requestUri HTTP/1.1";

// 使用 HmacUtil 计算 HMAC-SHA256 签名
final signature = XunfeiUtils.hmacSha256(apiSecret, signatureOrigin);

final authorization = base64.encode(utf8.encode(
"api_key=\"$apiKey\", algorithm=\"hmac-sha256\", headers=\"host date request-line\", signature=\"$signature\""));

final queryParams = {
"authorization": authorization,
"date": date,
"host": host,
"serviceId": "simult_interpretation"
};

final wsUri =
'ws://$host$requestUri?${Uri(queryParameters: queryParams).query}';
return wsUri;
}

//创建参数
Map<String, dynamic> _createParams(
String appId, int status, List<int> audio) {
final param = {
"header": {
"app_id": appId,
"status": status,
},
"parameter": {
"ist": {
"accent": "mandarin",
"domain": "ist_ed_open",
"language": "zh_cn",
"vto": 15000,
"eos": 150000
},
"streamtrans": {"from": "cn", "to": "en"},
"tts": {
"vcn": "x2_catherine",
"tts_results": {
"encoding": "raw",
"sample_rate": 16000,
"channels": 1,
"bit_depth": 16,
"frame_size": 0
}
}
},
"payload": {
"data": {
"audio": base64.encode(audio),
"encoding": "raw",
"sample_rate": 16000,
"seq": 1,
"status": status
}
}
};

return param;
}

// 创建WebSocket连接
Future<void> _connect() async {
_channel = WebSocketChannel.connect(Uri.parse(url));
_channel?.stream.listen(
(message) {
onMessage(message);
},
onError: (error) {
isconnected = false;
print('连接失败: $error');
},
onDone: () {
isconnected = false;
print('WebSocket 连接已关闭');
},
cancelOnError: true,
);
isconnected = true;
}

// 启动上传任务
void _startUploadTask() {
_isRunning = true;
_uploadTask = _pushaudio();
}

// 上传音频
Future<void> _pushaudio() async {
int frameSize = 1280; // 每一帧的音频大小
double interval = 0.04; // 发送音频间隔(单位:s)
int status = STATUS_FIRST_FRAME; // 音频的状态信息,标识音频是第一帧,还是中间帧、最后一帧
int index = 0;
List<int> buffer = [];
try {
await for (List<int> frame in stream) {
// 将音频数据添加到 buffer
buffer.addAll(frame);
while (buffer.length >= frameSize) {
List<int> sendFrame = buffer.sublist(0, frameSize);
buffer = buffer.sublist(frameSize);

// 判断是否读取到足够的帧
if (index + frameSize <= buffer.length) {
frame = buffer.sublist(index, index + frameSize);
index += frameSize;
} else {
frame = buffer.sublist(index);
index = buffer.length; // 结束
}

// 第一帧处理
if (status == STATUS_FIRST_FRAME) {
String data = json.encode(_createParams(appId, status, sendFrame));
_channel?.sink.add(data);
print('第一帧已发送...$data');
status = STATUS_CONTINUE_FRAME;
}
// 中间帧处理
else if (status == STATUS_CONTINUE_FRAME) {
String data = json.encode(_createParams(appId, status, sendFrame));
_channel?.sink.add(data);
// print('中间帧已发送...');
}
// 最后一帧处理
else if (status == STATUS_LAST_FRAME) {
print('最后一帧已发送...');
String data = json.encode(_createParams(appId, status, sendFrame));
_channel?.sink.add(data);
break;
}
// 模拟音频采样间隔
await Future.delayed(
Duration(milliseconds: (interval * 1000).toInt()));
}
}
print('最后一帧已发送...');
status = STATUS_LAST_FRAME;
String data = json.encode(_createParams(appId, status, []));
_channel?.sink.add(data);
} catch (e) {
print("push msg: $e");
}
print('音频处理完成');
}

// 向流中添加音频数据
void addAudioData(List<int> data) {
if (!_isStreamClosed) {
_streamController.add(data);
} else {
print("Stream is closed. Cannot add more data.");
}
}

//接收到翻译结果
Future<void> onMessage(String message) async {
// try {
// print("收到的消息:$message");
// } catch (e) {
// print("receive msg, but parse exception: $e");
// }
// 对结果进行解析
var messageMap = json.decode(message);
var status = messageMap["header"]["status"];
var sid = messageMap["header"]["sid"];
// 接收到的识别结果写到文本
if (messageMap.containsKey('payload') &&
messageMap['payload'].containsKey('recognition_results')) {
var result = messageMap['payload']['recognition_results']['text'];
var asrresult = utf8.decode(base64.decode(result));
addtext(asrresult);
print("收到识别回应..${text()}");
}
if (messageMap.containsKey('payload') &&
messageMap['payload'].containsKey('tts_results')) {
var audio = messageMap['payload']['tts_results']['audio'];
var audioData = base64.decode(audio);
_transtreamController.add(audioData);
// curraudio.addAudioData(audioData);
// var file = File('output/audio/trans.pcm');
// await file.writeAsBytes(audioData, mode: FileMode.append);
}
if (status == 2) {
print("数据处理完毕,等待实时转译结束!同传后的音频文件请到output/audio/目录查看...");
await Future.delayed(Duration(seconds: 3));
close();
}
}

// 关闭流并停止上传任务
Future<void> close() async {
if (!_isStreamClosed) {
_isStreamClosed = true;
_isRunning = false; // 停止上传任务
await _streamController.close(); // 关闭流
await _uploadTask; // 等待上传任务完成
print("Stream and upload task closed.");
}
}

void addtext(String result) {
print("添加文本结果:$result");
var resultMap = json.decode(result);
int sn = resultMap["sn"] as int;
String pgs = resultMap["pgs"] as String;
List<int> rg = resultMap["rg"] != null
? List<int>.from(resultMap["rg"])
: []; // 默认值为空列表
List<dynamic> ws = resultMap["ws"] as List<dynamic>;
var item = XunferTask_Result_Text_Item(sn: sn, pgs: pgs, rg: rg, ws: ws);
tests[sn] = item;
}

String text() {
if (tests.isNotEmpty) {
String resultStr = "";
Map<int, XunferTask_Result_Text_Item> _results = {};
var sortedKeys = tests.keys.toList()..sort();
for (var key in sortedKeys) {
var item = tests[key];
if (item != null) {
if (item.pgs == "rpl") {
var start = item.rg[0];
var end = item.rg[1];
for (int i = start; i <= end; i++) {
_results.remove(i);
}
}
_results[item.sn] = item;
}
}
var keys = _results.keys.toList()..sort();
for (var key in keys) {
var item = tests[key];
if (item != null) {
for (var ws in item.ws) {
var it = ws as Map<String, dynamic>;
var cw = it["cw"] as List<dynamic>;
for (var ct in cw) {
resultStr += ct["w"] as String;
}
}
}
}
return resultStr;
}
return "";
}

Future<void> audio(AudioPlayer _audioPlayer) async {
_streamController.stream.listen((List<int> data) async {
// 转换为 Uint8List
Uint8List audioBytes = Uint8List.fromList(data);
if (!_isPlaying) {
// 第一次播放
await _audioPlayer.play(BytesSource(audioBytes));
setState(() {
_isPlaying = true;
});
} else {
// 追加数据(需确认插件是否支持动态追加)
// 注意:audioplayers 插件可能不支持此操作!
await _audioPlayer.add(BytesSource(audioBytes));
}
}, onError: (error) {
print("Error in audio stream: $error");
});
}
}

+ 14
- 0
lib/xunfei/utils.dart Visa fil

@@ -0,0 +1,14 @@
import 'dart:convert';

import 'package:crypto/crypto.dart';

class XunfeiUtils {
// 使用SHA-256算法计算HMAC
static String hmacSha256(String key, String message) {
var keyBytes = utf8.encode(key); // 将密钥转为字节数组
var messageBytes = utf8.encode(message); // 将消息转为字节数组
var hmac = Hmac(sha256, keyBytes); // 创建 HMAC 对象,指定哈希算法和密钥
var digest = hmac.convert(messageBytes); // 计算消息的哈希
return base64.encode(digest.bytes); // 返回 base64 编码的哈希值
}
}

+ 51
- 0
lib/xunfei/xunfei.dart Visa fil

@@ -0,0 +1,51 @@
import 'dart:ffi';

import 'package:demo001/xunfei/task_trans.dart';
import 'package:intl/intl.dart';

abstract class ISDK {
//创建翻译任务
ITaskTrans createTransTask();
}

abstract class ITaskTrans {
//添加音频数据
void addAudioData(List<int> data);
Future<void> close();
}

class Xunfei implements ISDK{
final String appId;
final String apiKey;
final String apiSecret;

//静态变量保存唯一实例
static Xunfei? _instance;
Xunfei._internal({
required this.appId,
required this.apiKey,
required this.apiSecret,
});

//工厂构造函数
factory Xunfei({
required String appId,
required String apiKey,
required String apiSecret,
}) {
_instance ??= Xunfei._internal(
appId: appId,
apiKey: apiKey,
apiSecret: apiSecret,
);
return _instance!;
}


ITaskTrans createTransTask(){
var task = XunferTaskTrans(appId:this.appId,apiKey:this.apiKey,apiSecret:this.apiSecret);
return task;
}


}

Laddar…
Avbryt
Spara