Код для записи
Это мой код для записи через микрофон
import pyaudio
import wave
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
CHUNK = 1024
RECORD_SECONDS = 10
name = input("What is the name of the file : ")
WAVE_OUTPUT_FILENAME = name +".wav"
audio = pyaudio.PyAudio()
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
print ("recording...")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print ("finished recording")
stream.stop_stream()
stream.close()
audio.terminate()
waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
Код для AudioSpectrum
И я создал этот AudioSpectrum, так что пока я записываюаудио, это будет показано, когда я говорю через микрофон
import matplotlib.pyplot as plt
import numpy as np
import pyaudio
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import struct
from scipy.fftpack import fft
import sys
import time
class AudioStream(object):
def __init__(self):
# stream constants
self.CHUNK = 1024
self.FORMAT = pyaudio.paInt16
self.CHANNELS = 1
self.RATE = 44100
self.pause = False
# stream object
self.p = pyaudio.PyAudio()
self.stream = self.p.open(
format=self.FORMAT,
channels=self.CHANNELS,
rate=self.RATE,
input=True,
output=True,
frames_per_buffer=self.CHUNK,
)
self.init_plots()
self.start_plot()
def init_plots(self):
# x variables for plotting
x = np.arange(0, 2 * self.CHUNK, 2)
xf = np.linspace(0, self.RATE, self.CHUNK)
# create matplotlib figure and axes
self.fig, (ax1, ax2) = plt.subplots(2, figsize=(15, 7))
self.fig.canvas.mpl_connect('button_press_event', self.onClick)
# create a line object with random data
self.line, = ax1.plot(x, np.random.rand(self.CHUNK), '-', lw=2)
# create semilogx line for spectrum
self.line_fft, = ax2.semilogx(
xf, np.random.rand(self.CHUNK), '-', lw=2)
# format waveform axes
ax1.set_title('AUDIO WAVEFORM')
ax1.set_xlabel('samples')
ax1.set_ylabel('volume')
ax1.set_ylim(0, 255)
ax1.set_xlim(0, 2 * self.CHUNK)
plt.setp(
ax1, yticks=[0, 128, 255],
xticks=[0, self.CHUNK, 2 * self.CHUNK],
)
plt.setp(ax2, yticks=[0, 1],)
# format spectrum axes
ax2.set_xlim(20, self.RATE / 2)
# show axes
thismanager = plt.get_current_fig_manager()
plt.show(block=False)
def start_plot(self):
print('stream started')
frame_count = 0
start_time = time.time()
while not self.pause:
data = self.stream.read(self.CHUNK)
data_int = struct.unpack(str(2 * self.CHUNK) + 'B', data)
data_np = np.array(data_int, dtype='b')[::2] + 128
self.line.set_ydata(data_np)
# compute FFT and update line
yf = fft(data_int)
self.line_fft.set_ydata(
np.abs(yf[0:self.CHUNK]) / (128 * self.CHUNK))
# update figure canvas
self.fig.canvas.draw()
self.fig.canvas.flush_events()
frame_count += 1
else:
self.fr = frame_count / (time.time() - start_time)
print('average frame rate = {:.0f} FPS'.format(self.fr))
self.exit_app()
def exit_app(self):
print('stream closed')
self.p.close(self.stream)
def onClick(self, event):
self.pause = True
if __name__ == '__main__':
AudioStream()
Наконец, это моя попытка соединить эти два кода вместе
** Однако я получаю эту ошибку: return pa.read_stream (self._stream, num_frames, exception_on_overflow)
OSError: [Errno -9988] Поток закрыт **
import matplotlib.pyplot as plt
import numpy as np
import pyaudio
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import struct
from scipy.fftpack import fft
import sys
import time
import wave
class AudioStream(object):
def __init__(self):
# stream constants
self.CHUNK = 44100
self.FORMAT = pyaudio.paInt16
self.CHANNELS = 1
self.RATE = 44100
self.pause = False
self.name = input("What is the name of the file : ")
self.WAVE_OUTPUT_FILENAME = self.name +".wav"
self.RECORD_SECONDS = 10
# stream object
self.p = pyaudio.PyAudio()
self.stream = self.p.open(
format=self.FORMAT,
channels=self.CHANNELS,
rate=self.RATE,
input=True,
output=True,
frames_per_buffer=self.CHUNK,
)
self.init_plots()
self.start_plot()
self.record()
self.stop_recording()
def init_plots(self):
# x variables for plotting
x = np.arange(0, 2 * self.CHUNK, 2)
xf = np.linspace(0, self.RATE, self.CHUNK)
# create matplotlib figure and axes
self.fig, (ax1, ax2) = plt.subplots(2, figsize=(15, 7))
self.fig.canvas.mpl_connect('button_press_event', self.onClick)
# create a line object with random data
self.line, = ax1.plot(x, np.random.rand(self.CHUNK), '-', lw=2)
# create semilogx line for spectrum
self.line_fft, = ax2.semilogx(
xf, np.random.rand(self.CHUNK), '-', lw=2)
# format waveform axes
ax1.set_title('AUDIO WAVEFORM')
ax1.set_xlabel('samples')
ax1.set_ylabel('volume')
ax1.set_ylim(0, 255)
ax1.set_xlim(0, 2 * self.CHUNK)
plt.setp(
ax1, yticks=[0, 128, 255],
xticks=[0, self.CHUNK, 2 * self.CHUNK],
)
plt.setp(ax2, yticks=[0, 1],)
# format spectrum axes
ax2.set_xlim(20, self.RATE / 2)
# show axes
thismanager = plt.get_current_fig_manager()
plt.show(block=False)
def start_plot(self):
print('stream started')
frame_count = 0
start_time = time.time()
while not self.pause:
data = self.stream.read(self.CHUNK)
data_int = struct.unpack(str(2 * self.CHUNK) + 'B', data)
data_np = np.array(data_int, dtype='b')[::2] + 128
self.line.set_ydata(data_np)
# compute FFT and update line
yf = fft(data_int)
self.line_fft.set_ydata(
np.abs(yf[0:self.CHUNK]) / (128 * self.CHUNK))
# update figure canvas
self.fig.canvas.draw()
self.fig.canvas.flush_events()
frame_count += 1
else:
self.fr = frame_count / (time.time() - start_time)
print('average frame rate = {:.0f} FPS'.format(self.fr))
self.exit_app()
def exit_app(self):
print('stream closed')
self.p.close(self.stream)
def onClick(self, event):
self.pause = True
def record(self):
print("recoding")
frames = []
for i in range(0, int(self.RATE / self.CHUNK * self.RECORD_SECONDS)):
data = self.stream.read(self.CHUNK, exception_on_overflow=False)
frames.append(data)
print ("finished recording")
def stop_recording(self):
print("Stopped")
stream.stop_stream()
stream.close()
audio.terminate()
waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
if __name__ == '__main__':
AudioStream()
Извините, что это так долго, но помогите мне с этим проектом, я искалчасы и часы через интернет, но я не мог ничего получить.
Пожалуйста, скажите мне, что я должен исправить при соединении двух кодов