Я надеюсь, что кто-нибудь сможет мне помочь. Я использую NAudio, записываю с нескольких устройств wsapiloopbackcapture и смешиваю их вместе. Ничего из этого нельзя записать на диск, поэтому я делаю все в памяти и в конечном итоге загружаю данные в параллельный словарь для использования другим процессом, который я разработаю позже. Все это работает, за исключением того, что всякий раз, когда я запускаю свой поток для загрузки данных фрагмента в свой словарь, при воспроизведении звука я слышу звук отдельного клипа, и он совпадает с частотой запуска потока. Если я настрою его запускать каждые 20 секунд, я слышу звук клипа на отметке 20 секунд, когда я слушаю звук. Мне нужно избавиться от этого отсечения, и я не могу понять, что его вызывает.
Вот основные c шаги
private static WasapiLoopbackCapture Wavein = null;
private static WasapiLoopbackCapture Wavein2 = null;
private static WaveFormat pcm8k16bitSt = new WaveFormat(8000, 16, 2);
private static WaveFormat pcm8k16bitMo = new WaveFormat(8000, 16, 1);
private static WaveFormat Bit16;
private static byte[] DataHeader = System.Text.Encoding.UTF8.GetBytes("data");
private static List<byte> SBL = new List<byte>();
private static List<byte> SBL2 = new List<byte>();
private static byte[] chunkdata;
private static byte[] chunkdata2;
internal static ConcurrentDictionary<DateTime, DataFrame> AllSpeakerBytes = new ConcurrentDictionary<DateTime, DataFrame>();
int SdeviceNumber = 0;
int SdeviceNumber2 = 0;
private static MMDevice deviceToRecord = null;
private static MMDevice deviceToRecord2 = null;
deviceToRecord = (new MMDeviceEnumerator().EnumerateAudioEndPoints(DataFlow.All, DeviceState.Active))[SdeviceNumber];
deviceToRecord2 = (new MMDeviceEnumerator().EnumerateAudioEndPoints(DataFlow.All, DeviceState.Active))[SdeviceNumber2];
RecordAllSpeakers();
private void RecordAllSpeakers()
{
if (deviceToRecord != null)
{
Wavein = new WasapiLoopbackCapture(deviceToRecord);
var silence = new SilenceProvider(Wavein.WaveFormat).ToSampleProvider();
var wo = new WaveOutEvent();
wo.DeviceNumber = SdeviceNumber;
wo.Init(silence);
wo.Play();
Wavein.DataAvailable += new EventHandler<NAudio.Wave.WaveInEventArgs>(SsourceStream_DataAvailable);
Wavein.StartRecording();
SRecFlag = true;
}
if (deviceToRecord2 != null)
{
Wavein2 = new WasapiLoopbackCapture(deviceToRecord2);
var silence = new SilenceProvider(Wavein2.WaveFormat).ToSampleProvider();
var wo = new WaveOutEvent();
wo.DeviceNumber = SdeviceNumber2;
wo.Init(silence);
wo.Play();
Wavein2.DataAvailable += new EventHandler<NAudio.Wave.WaveInEventArgs>(SsourceStream2_DataAvailable);
Wavein2.StartRecording();
SRecFlag2 = true;
}
}
private void SsourceStream_DataAvailable(object sender, NAudio.Wave.WaveInEventArgs e)
{
if (!SRecFlag) return;
if (Wavein.WaveFormat.BitsPerSample == 32)
{
#region NewStraightConvert
using (RawSourceWaveStream RS = new RawSourceWaveStream(e.Buffer, 0, e.BytesRecorded, WaveFormat.CreateIeeeFloatWaveFormat(Wavein.WaveFormat.SampleRate, Wavein.WaveFormat.Channels)))
using (Wave32To16Stream wav16 = new Wave32To16Stream(RS))
using (MemoryStream ms2 = new MemoryStream())
{
WaveFileWriter.WriteWavFileToStream(ms2, wav16);
ms2.Position = 0;
using (var reader = new WaveFileReader(ms2))
using (var conversionStream0 = new WaveFormatConversionStream(pcm8k16bitSt, reader))
using (var conversionStream1 = new WaveFormatConversionStream(pcm8k16bitMo, conversionStream0))
//using (var conversionStream2 = new WaveFormatConversionStream(muLaw8k8bit, conversionStream1))
{
byte[] SendingBytes;
using (MemoryStream ms3 = new MemoryStream())
{
using (RawSourceWaveStream cc = new RawSourceWaveStream(conversionStream1, pcm8k16bitMo))
{
cc.Position = 0;
cc.CopyTo(ms3);
SendingBytes = ms3.ToArray();
}
}
if (SendingBytes.Length > 0)
{
//SslTcpClient.VociDataToSendST2.AddRange(SendingBytes);
SBL.AddRange(SendingBytes);
}
}
}
#endregion
return;
}
else
{
byte[] outtovoci;
Bit16 = new WaveFormat(Wavein.WaveFormat.SampleRate, 16, Wavein.WaveFormat.Channels);
outtovoci = new byte[e.BytesRecorded];
Array.Copy(e.Buffer, 0, outtovoci, 0, e.BytesRecorded);
using (MemoryStream TESTWaveMS = new MemoryStream())
{
using (MemoryStream TESTWaveMS2 = new MemoryStream())
{
using (WaveFileWriter TESTwaveWriter = new WaveFileWriter(TESTWaveMS, Bit16))
{
TESTwaveWriter.Write(outtovoci, 0, outtovoci.Length);
TESTwaveWriter.Flush();
byte[] tbytes = TESTWaveMS.ToArray();
using (MemoryStream tstream = new MemoryStream(tbytes))
{
using (var reader = new WaveFileReader(tstream))
using (var conversionStream0 = new WaveFormatConversionStream(pcm8k16bitSt, reader))
using (var conversionStream1 = new WaveFormatConversionStream(pcm8k16bitMo, conversionStream0))
//using (var conversionStream2 = new WaveFormatConversionStream(muLaw8k8bit, conversionStream1))
{
WaveFileWriter.WriteWavFileToStream(TESTWaveMS2, conversionStream1);
byte[] tbytes2 = TESTWaveMS2.ToArray();
int fPos = SearchBytes(tbytes2, DataHeader);
if (fPos > 0)
{
fPos = fPos + 8;
}
else
{
fPos = 0;
}
long SendingBytes = tbytes2.Length - fPos;
byte[] WBack = new byte[SendingBytes];
if (SendingBytes > 0)
{
Array.Copy(tbytes2, fPos, WBack, 0, SendingBytes);
//SslTcpClient.VociDataToSendST2.AddRange(WBack);
SBL.AddRange(WBack);
}
}
}
}
}
}
}
}
private void SsourceStream2_DataAvailable(object sender, NAudio.Wave.WaveInEventArgs e)
{
if (!SRecFlag2) return;
if (Wavein2.WaveFormat.BitsPerSample == 32)
{
#region NewStraightConvert
using (RawSourceWaveStream RS = new RawSourceWaveStream(e.Buffer, 0, e.BytesRecorded, WaveFormat.CreateIeeeFloatWaveFormat(Wavein2.WaveFormat.SampleRate, Wavein2.WaveFormat.Channels)))
using (Wave32To16Stream wav16 = new Wave32To16Stream(RS))
using (MemoryStream ms2 = new MemoryStream())
{
WaveFileWriter.WriteWavFileToStream(ms2, wav16);
ms2.Position = 0;
using (var reader = new WaveFileReader(ms2))
using (var conversionStream0 = new WaveFormatConversionStream(pcm8k16bitSt, reader))
using (var conversionStream1 = new WaveFormatConversionStream(pcm8k16bitMo, conversionStream0))
//using (var conversionStream2 = new WaveFormatConversionStream(muLaw8k8bit, conversionStream1))
{
byte[] SendingBytes;
using (MemoryStream ms3 = new MemoryStream())
{
using (RawSourceWaveStream cc = new RawSourceWaveStream(conversionStream1, pcm8k16bitMo))
{
cc.Position = 0;
cc.CopyTo(ms3);
SendingBytes = ms3.ToArray();
}
}
if (SendingBytes.Length > 0)
{
//SslTcpClient.VociDataToSendST2.AddRange(SendingBytes);
SBL2.AddRange(SendingBytes);
}
}
}
#endregion
return;
}
else
{
byte[] outtovoci;
Bit16 = new WaveFormat(Wavein2.WaveFormat.SampleRate, 16, Wavein2.WaveFormat.Channels);
outtovoci = new byte[e.BytesRecorded];
Array.Copy(e.Buffer, 0, outtovoci, 0, e.BytesRecorded);
using (MemoryStream TESTWaveMS = new MemoryStream())
{
using (MemoryStream TESTWaveMS2 = new MemoryStream())
{
using (WaveFileWriter TESTwaveWriter = new WaveFileWriter(TESTWaveMS, Bit16))
{
TESTwaveWriter.Write(outtovoci, 0, outtovoci.Length);
TESTwaveWriter.Flush();
byte[] tbytes = TESTWaveMS.ToArray();
using (MemoryStream tstream = new MemoryStream(tbytes))
{
using (var reader = new WaveFileReader(tstream))
using (var conversionStream0 = new WaveFormatConversionStream(pcm8k16bitSt, reader))
using (var conversionStream1 = new WaveFormatConversionStream(pcm8k16bitMo, conversionStream0))
//using (var conversionStream2 = new WaveFormatConversionStream(muLaw8k8bit, conversionStream1))
{
WaveFileWriter.WriteWavFileToStream(TESTWaveMS2, conversionStream1);
byte[] tbytes2 = TESTWaveMS2.ToArray();
int fPos = SearchBytes(tbytes2, DataHeader);
if (fPos > 0)
{
fPos = fPos + 8;
}
else
{
fPos = 0;
}
long SendingBytes = tbytes2.Length - fPos;
byte[] WBack = new byte[SendingBytes];
if (SendingBytes > 0)
{
Array.Copy(tbytes2, fPos, WBack, 0, SendingBytes);
//SslTcpClient.VociDataToSendST2.AddRange(WBack);
SBL2.AddRange(WBack);
}
}
}
}
}
}
}
}
private async void timer3_Tick(object sender, EventArgs e)
{
timer3.Enabled = false;
if (SRecFlag == true || SRecFlag2 == true)
{
await Task.Run(() => SyncSpeakers());
}
timer3.Interval = 20000;
timer3.Enabled = true;
}
private static void SyncSpeakers()
{
MemoryStream ms = new MemoryStream();
MemoryStream ms2 = new MemoryStream();
WaveFileReader reader = null;
WaveFileReader reader2 = null;
MixingSampleProvider mixer = null;
int lbc = SBL.Count();
int lbc2 = SBL2.Count();
int lowest = 0;
int[] array = new int[] { lbc, lbc2 };
lowest = array.Where(f => f > 0).Min();
if (deviceToRecord != null && SBL.Count > 0)
{
chunkdata = new byte[lowest];
Array.Copy(SBL.ToArray(), 0, chunkdata, 0, chunkdata.Length);
SwaveWriterS1 = new NAudio.Wave.WaveFileWriter(ms, pcm8k16bitMo);
SwaveWriterS1.Write(chunkdata, 0, chunkdata.Length);
SwaveWriterS1.Flush();
SBL.RemoveRange(0, lowest);
}
if (deviceToRecord2 != null && SBL2.Count > 0)
{
chunkdata2 = new byte[lowest];
Array.Copy(SBL2.ToArray(), 0, chunkdata2, 0, chunkdata2.Length);
SwaveWriterS2 = new NAudio.Wave.WaveFileWriter(ms2, pcm8k16bitMo);
SwaveWriterS2.Write(chunkdata2, 0, chunkdata2.Length);
SwaveWriterS2.Flush();
SBL2.RemoveRange(0, lowest);
}
int SWaves = 0;
if (Wavein != null && SRecFlag == true)
{
ms.Position = 0;
reader = new WaveFileReader(ms);
SWaves++;
}
if (Wavein2 != null && SRecFlag2 == true)
{
ms2.Position = 0;
reader2 = new WaveFileReader(ms2);
SWaves++;
}
if (SWaves == 1)
{
mixer = new MixingSampleProvider(new[] { reader.ToSampleProvider() });
}
else if (SWaves == 2)
{
mixer = new MixingSampleProvider(new[] { reader.ToSampleProvider(), reader2.ToSampleProvider() });
}
if (SWaves > 0)
{
using (MemoryStream lms = new MemoryStream())
{
WaveFileWriter.WriteWavFileToStream(lms, mixer.ToWaveProvider16());
byte[] SendingBytes;
using (MemoryStream ms35 = new MemoryStream())
{
using (RawSourceWaveStream cc = new RawSourceWaveStream(lms, pcm8k16bitMo))
{
cc.Position = 0;
cc.CopyTo(ms35);
SendingBytes = ms35.ToArray();
SwaveWriter.Write(SendingBytes, 0, SendingBytes.Length);
SwaveWriter.Flush();
byte[] lByte = Compress(SendingBytes);
DataFrame aFrame2 = new DataFrame();
aFrame2.bytes = new byte[lByte.Length];
aFrame2.bytesRecorded = SendingBytes.Length;
lByte.CopyTo(aFrame2.bytes, 0);
AllSpeakerBytes.TryAdd(DateTime.UtcNow, aFrame2);
lByte = null;
}
}
}
}
}
internal static byte[] Compress(byte[] data)
{
try
{
//return data;
using (MemoryStream output = new MemoryStream())
{
using (DeflateStream dstream = new DeflateStream(output, CompressionLevel.Optimal))
{
dstream.Write(data, 0, data.Length);
}
return output.ToArray();
}
}
catch (Exception ERR5)
{
//Logging.LogData($"Failure in Compress: {ERR5.ToString()}");
return null;
}
}
internal static byte[] Decompress(byte[] data)
{
//return data;
try
{
using (MemoryStream output = new MemoryStream())
{
using (MemoryStream input = new MemoryStream(data))
{
using (DeflateStream dstream = new DeflateStream(input, CompressionMode.Decompress))
{
dstream.CopyTo(output);
}
}
return output.ToArray();
}
}
catch (Exception ERR5)
{
//Logging.LogData($"Failure in Decompress: {ERR5.ToString()}");
return null;
}
}
class DataFrame
{
public byte[] bytes { get; set; }
public int bytesRecorded { get; set; }
public string extraData { get; set; } = "";
}
private void btnStop_Click(object sender, EventArgs e)
{
if (Wavein != null)
{
Wavein.StopRecording();
Wavein.Dispose();
Wavein = null;
}
if (Wavein2 != null)
{
Wavein2.StopRecording();
Wavein2.Dispose();
Wavein2 = null;
}
mp3WriterAllSpk = new NAudio.Lame.LameMP3FileWriter(FinalSRFilename, pcm8k16bitMo, 64);
List<DateTime> SpkWrites = AllSpeakerBytes.Select(k => k.Key).OrderBy(c => c).ToList();
foreach (DateTime DT in SpkWrites)
{
byte[] outgoing = Decompress(AllSpeakerBytes[DT].bytes);
mp3WriterAllSpk.Write(outgoing, 0, AllSpeakerBytes[DT].bytesRecorded);
}
mp3WriterAllSpk.Dispose();
SRecFlag = false;
SRecFlag2 = false;
}
Этот код требует тщательной очистки вверх, но в основном SyncSpeakers () запускается каждые 20 секунд, и я слышу обрезку звука с 20-секундным шагом. Если я изменю таймер на запуск каждые 10 секунд, я буду слышать звук клипа каждые 10 секунд полученного звука. Есть идеи?