|
|
@@ -4,8 +4,8 @@ import 'dart:typed_data'; |
|
|
|
import 'package:demo001/xunfei/xunfei.dart'; |
|
|
|
import 'package:flutter/material.dart'; |
|
|
|
import 'package:path_provider/path_provider.dart'; |
|
|
|
import 'package:flutter_sound/flutter_sound.dart'; |
|
|
|
import 'package:just_audio/just_audio.dart'; |
|
|
|
import 'package:just_audio/just_audio.dart' as just_audio; |
|
|
|
import 'package:flutter_sound/flutter_sound.dart' as flutter_sound; |
|
|
|
import 'package:permission_handler/permission_handler.dart'; |
|
|
|
|
|
|
|
class SoundRecordScene extends StatefulWidget { |
|
|
@@ -15,8 +15,8 @@ class SoundRecordScene extends StatefulWidget { |
|
|
|
|
|
|
|
class _SoundRecordSceneState extends State<SoundRecordScene> { |
|
|
|
late ISDK _sdk; |
|
|
|
FlutterSoundRecorder? _soundRecorder; |
|
|
|
AudioPlayer? _audioPlayer; |
|
|
|
flutter_sound.FlutterSoundRecorder? _soundRecorder; |
|
|
|
just_audio.AudioPlayer? _audioPlayer; |
|
|
|
bool _isRecorderReady = false; |
|
|
|
bool _isRecording = false; |
|
|
|
bool _isSpeaking = false; //是否说话 |
|
|
@@ -34,7 +34,7 @@ class _SoundRecordSceneState extends State<SoundRecordScene> { |
|
|
|
final Duration _silenceDuration = Duration(seconds: 1); // 持续低于阈值的时间 |
|
|
|
|
|
|
|
// 采样率和声道数 |
|
|
|
Codec _audiocodec = Codec.pcm16; |
|
|
|
flutter_sound.Codec _audiocodec = flutter_sound.Codec.pcm16; |
|
|
|
final int _sampleRate = 16000; // 16kHz 采样率 |
|
|
|
final int _numChannels = 1; // 单声道 |
|
|
|
StreamController<Uint8List> _audioDataStreamController = |
|
|
@@ -46,7 +46,7 @@ class _SoundRecordSceneState extends State<SoundRecordScene> { |
|
|
|
appId: "137dc132", |
|
|
|
apiKey: "1c1891a475e71250ecd1320303ad6545", |
|
|
|
apiSecret: "MjZhNDA1NTI1NWZkZDQxOTMxYzMyN2Yw"); |
|
|
|
_audioPlayer = AudioPlayer(); |
|
|
|
_audioPlayer = just_audio.AudioPlayer(); |
|
|
|
_requestPermissions(); |
|
|
|
_initRecorder(); |
|
|
|
} |
|
|
@@ -54,14 +54,15 @@ class _SoundRecordSceneState extends State<SoundRecordScene> { |
|
|
|
// 初始化录音器 |
|
|
|
void _initRecorder() async { |
|
|
|
try { |
|
|
|
_soundRecorder = FlutterSoundRecorder(); |
|
|
|
_soundRecorder = flutter_sound.FlutterSoundRecorder(); |
|
|
|
await _soundRecorder?.openRecorder(); |
|
|
|
await _soundRecorder |
|
|
|
?.setSubscriptionDuration(const Duration(milliseconds: 100)); |
|
|
|
//检查编解码器是否支持 |
|
|
|
if (!await _soundRecorder!.isEncoderSupported(Codec.pcm16)) { |
|
|
|
if (!await _soundRecorder! |
|
|
|
.isEncoderSupported(flutter_sound.Codec.pcm16)) { |
|
|
|
_log("PCM16 codec is not supported on this device."); |
|
|
|
_audiocodec = Codec.aacADTS; |
|
|
|
_audiocodec = flutter_sound.Codec.aacADTS; |
|
|
|
} |
|
|
|
setState(() { |
|
|
|
_isRecorderReady = true; |
|
|
@@ -112,7 +113,8 @@ class _SoundRecordSceneState extends State<SoundRecordScene> { |
|
|
|
numChannels: _numChannels, // 设置声道数 |
|
|
|
enableVoiceProcessing: true, // 启用音量监听 |
|
|
|
); |
|
|
|
_soundRecorder?.onProgress!.listen((RecordingDisposition event) { |
|
|
|
_soundRecorder?.onProgress! |
|
|
|
.listen((flutter_sound.RecordingDisposition event) { |
|
|
|
// _log('onProgress 回调触发, 分贝: ${event.decibels}'); |
|
|
|
if (event.decibels != null) { |
|
|
|
setState(() { |
|
|
@@ -226,7 +228,9 @@ class _SoundRecordSceneState extends State<SoundRecordScene> { |
|
|
|
|
|
|
|
//任务状态变化 |
|
|
|
void _taskchange(ITaskTrans task) { |
|
|
|
if (task.state() == 2) {} |
|
|
|
if (task.state() == 3) { |
|
|
|
playAudioStream(task.translateAudio()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// 添加日志信息并自动滚动 |
|
|
@@ -254,13 +258,18 @@ class _SoundRecordSceneState extends State<SoundRecordScene> { |
|
|
|
// 播放音频流 |
|
|
|
void playAudioStream(Stream<Uint8List> audioStream) async { |
|
|
|
try { |
|
|
|
await for (var chunk in audioStream) { |
|
|
|
// 每次接收到音频数据块后,播放它 |
|
|
|
await _audioPlayer?.setUrl(Uri.dataFromBytes(chunk).toString()); |
|
|
|
await _audioPlayer?.play(); |
|
|
|
} |
|
|
|
// 创建自定义的 AudioSource |
|
|
|
final audioSource = CustomStreamAudioSource( |
|
|
|
audioStream: audioStream, |
|
|
|
contentLength: null, // 如果不知道音频数据长度,设置为 null |
|
|
|
); |
|
|
|
// 设置音频源并播放 |
|
|
|
await _audioPlayer?.setAudioSource(audioSource); |
|
|
|
await _audioPlayer?.play(); |
|
|
|
|
|
|
|
print('音频流播放开始'); |
|
|
|
} catch (e) { |
|
|
|
print("音频流播放错误: $e"); |
|
|
|
print('播放音频流失败: $e'); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
@@ -399,3 +408,41 @@ class _SoundRecordSceneState extends State<SoundRecordScene> { |
|
|
|
); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
class CustomStreamAudioSource extends just_audio.StreamAudioSource { |
|
|
|
final Stream<Uint8List> audioStream; |
|
|
|
final int? contentLength; |
|
|
|
|
|
|
|
CustomStreamAudioSource({ |
|
|
|
required this.audioStream, |
|
|
|
this.contentLength, |
|
|
|
}); |
|
|
|
|
|
|
|
@override |
|
|
|
Future<just_audio.StreamAudioResponse> request([int? start, int? end]) async { |
|
|
|
try { |
|
|
|
// 将 Stream<Uint8List> 转换为 Stream<List<int>> |
|
|
|
final stream = audioStream.map((uint8List) => uint8List.toList()); |
|
|
|
|
|
|
|
// 处理范围请求 |
|
|
|
if (start != null || end != null) { |
|
|
|
// 这里假设音频流支持范围请求 |
|
|
|
// 如果音频流不支持范围请求,可以忽略 start 和 end 参数 |
|
|
|
// 或者抛出一个异常 |
|
|
|
throw UnsupportedError('Range requests are not supported'); |
|
|
|
} |
|
|
|
|
|
|
|
// 返回 StreamAudioResponse |
|
|
|
return just_audio.StreamAudioResponse( |
|
|
|
stream: stream, |
|
|
|
contentLength: contentLength, |
|
|
|
sourceLength: null, |
|
|
|
offset: null, |
|
|
|
contentType: 'audio/mpeg', |
|
|
|
); |
|
|
|
} catch (e) { |
|
|
|
print('请求音频流失败: $e'); |
|
|
|
rethrow; |
|
|
|
} |
|
|
|
} |
|
|
|
} |