Showing
12 changed files
with
463 additions
and
104 deletions
calc_decibel.py
0 → 100644
1 | +import pyaudio | ||
2 | +import numpy as np | ||
3 | + | ||
4 | +# Define constants | ||
5 | +CHUNK_SIZE = 1024 # Number of audio frames per buffer | ||
6 | +FORMAT = pyaudio.paInt16 # Audio format (16-bit int) | ||
7 | +CHANNELS = 1 # Mono audio | ||
8 | +RATE = 44100 # Sample rate (Hz) | ||
9 | +RMS_REF = 1.0 # Reference RMS amplitude for dB calculation | ||
10 | + | ||
11 | +# Initialize PyAudio stream | ||
12 | +audio = pyaudio.PyAudio() | ||
13 | +stream = audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK_SIZE) | ||
14 | + | ||
15 | +# Main loop | ||
16 | +while True: | ||
17 | + # Read audio data from stream | ||
18 | + data = stream.read(CHUNK_SIZE) | ||
19 | + | ||
20 | + # Convert audio data to numpy array | ||
21 | + audio_array = np.frombuffer(data, dtype=np.int16) | ||
22 | + | ||
23 | + # Calculate RMS amplitude | ||
24 | + rms = np.sqrt(np.mean(np.square(audio_array))) | ||
25 | + | ||
26 | + # Calculate dB level | ||
27 | + avg_db = 20 * np.log10(rms / RMS_REF) | ||
28 | + max_db = 20 * np.log10(np.max(audio_array) / RMS_REF) | ||
29 | + | ||
30 | + # Print dB level to console | ||
31 | + try: | ||
32 | + if(avg_db != np.NaN and max_db != np.NaN and max_db > 50): | ||
33 | + print("dB level:", int(avg_db), int(max_db)) | ||
34 | + except: | ||
35 | + continue | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
callBackWire.py
deleted
100644 → 0
1 | -"""PyAudio Example: Audio wire between input and output. Callback version.""" | ||
2 | - | ||
3 | -import time | ||
4 | -import sys | ||
5 | -import pyaudio | ||
6 | -import numpy as np | ||
7 | - | ||
8 | - | ||
9 | -DURATION = 5 # seconds | ||
10 | - | ||
11 | -delay_buffer = np.zeros((44100, 2), dtype=np.float32) | ||
12 | - | ||
13 | -def callback(in_data, frame_count, time_info, status): | ||
14 | - global delay_buffer | ||
15 | - audio_data = np.frombuffer(in_data, dtype=np.float32).reshape(frame_count, 2) | ||
16 | - delayed_data = np.concatenate((delay_buffer, audio_data)) | ||
17 | - delay_buffer = delayed_data[frame_count:] | ||
18 | - return (audio_data + 0.5 * delay_buffer).tobytes(), pyaudio.paContinue | ||
19 | - | ||
20 | -p = pyaudio.PyAudio() | ||
21 | -stream = p.open(format=p.get_format_from_width(2), | ||
22 | - channels=1, | ||
23 | - rate=44100, | ||
24 | - input=True, | ||
25 | - output=True, | ||
26 | - frames_per_buffer=1024, | ||
27 | - | ||
28 | - stream_callback=callback) | ||
29 | - | ||
30 | -start = time.time() | ||
31 | -while stream.is_active() and (time.time() - start) < DURATION: | ||
32 | - time.sleep(0.1) | ||
33 | - | ||
34 | -stream.close() | ||
35 | -p.terminate() | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
find_peak.py
0 → 100644
1 | +import pyaudio | ||
2 | +import numpy as np | ||
3 | +import scipy.signal as signal | ||
4 | +import matplotlib.pyplot as plt | ||
5 | +# 파라미터 설정 | ||
6 | +RATE = 44100 # 샘플링 주파수 | ||
7 | +CHUNK = 1024 # 읽을 샘플의 수 | ||
8 | +THRESHOLD = 256 # 피크를 검출하기 위한 threshold 값 | ||
9 | +WIN_SIZE = 1024 # STFT를 적용할 윈도우 사이즈 | ||
10 | +HOP_SIZE = 512 # STFT에서 윈도우 사이의 거리 (오버랩 사이즈) | ||
11 | + | ||
12 | +# PyAudio 객체 생성 | ||
13 | +p = pyaudio.PyAudio() | ||
14 | + | ||
15 | +# 콜백 함수 정의 | ||
16 | +def process_audio(in_data, frame_count, time_info, status): | ||
17 | + # 오디오 데이터 변환 | ||
18 | + data = np.frombuffer(in_data, dtype=np.int16) | ||
19 | + | ||
20 | + # STFT 수행 | ||
21 | + f, t, Zxx = signal.stft(data, RATE, nperseg=WIN_SIZE, noverlap=HOP_SIZE) | ||
22 | + | ||
23 | + # 피크 검출 | ||
24 | + peaks, _ = signal.find_peaks(np.abs(np.mean(Zxx, axis=1)), height=THRESHOLD, distance=WIN_SIZE) | ||
25 | + # 파라미터 추정 | ||
26 | + if len(peaks) > 0: | ||
27 | + peak_idx = peaks[0] # 첫 번째 피크 선택 | ||
28 | + height = np.abs(Zxx[peak_idx, 0]) # 피크의 높이 추정 | ||
29 | + freq = f[peak_idx] # 피크의 주파수 추정 | ||
30 | + amp = np.max(np.abs(data)) # 신호의 진폭 추정 | ||
31 | + progress = (peak_idx + HOP_SIZE) / RATE # 충돌음의 진행 길이 추정 | ||
32 | + | ||
33 | + # 결과 출력 | ||
34 | + print("Height: {:.2f}, Frequency: {:.2f}, Amplitude: {:.2f}, Progress: {:.2f}".format(height, freq, amp, progress)) | ||
35 | + | ||
36 | + # 반환할 데이터 없음 | ||
37 | + return (in_data, pyaudio.paContinue) | ||
38 | + | ||
39 | +# 입력 스트림 열기 | ||
40 | +stream = p.open(format=p.get_format_from_width(2), | ||
41 | + channels=1, | ||
42 | + rate=RATE, | ||
43 | + input=True, | ||
44 | + output=True, | ||
45 | + frames_per_buffer=CHUNK, | ||
46 | + stream_callback=process_audio | ||
47 | + ) | ||
48 | + | ||
49 | +# 스트림 시작 | ||
50 | +stream.start_stream() | ||
51 | + | ||
52 | +# 프로그램 실행 중지 전까지 무한 대기 | ||
53 | +while stream.is_active(): | ||
54 | + pass | ||
55 | + | ||
56 | +# 스트림과 PyAudio 객체 종료 | ||
57 | +stream.stop_stream() | ||
58 | +stream.close() | ||
59 | +p.terminate() |
make_echo.py
0 → 100644
1 | + | ||
2 | + | ||
3 | + | ||
4 | +import sys | ||
5 | +import numpy as np | ||
6 | +import pyaudio | ||
7 | + | ||
8 | +RECORD_SECONDS = 5 | ||
9 | +CHUNK = 128 | ||
10 | +RATE = 44100 | ||
11 | +DELAY = 0.1 # Delay time in seconds | ||
12 | +GAIN = 1 # Echo gain (0 to 1) | ||
13 | + | ||
14 | +# Create buffer for delayed audio data | ||
15 | +buffer_size = int(RATE * DELAY) | ||
16 | +buffer = np.zeros(buffer_size, dtype=np.int16) | ||
17 | + | ||
18 | +def add_echo(in_data, frame_count, time_info, status_flags): | ||
19 | + global buffer | ||
20 | + data = np.frombuffer(in_data, dtype=np.int16) | ||
21 | + output = data + GAIN * buffer[:len(data)] | ||
22 | + buffer = np.roll(buffer, len(data)) | ||
23 | + buffer[-len(data):] = data | ||
24 | + return (output.astype(np.int16).tobytes(), pyaudio.paContinue) | ||
25 | + | ||
26 | + | ||
27 | +p = pyaudio.PyAudio() | ||
28 | +stream = p.open(format=p.get_format_from_width(2), | ||
29 | + channels=1 if sys.platform == 'darwin' else 2, | ||
30 | + rate=RATE, | ||
31 | + input=True, | ||
32 | + output=True, | ||
33 | + frames_per_buffer=CHUNK, | ||
34 | + stream_callback=add_echo | ||
35 | + ) | ||
36 | + | ||
37 | +print('* recording') | ||
38 | + | ||
39 | +stream.start_stream() | ||
40 | + | ||
41 | +while stream.is_active(): | ||
42 | + # Do other processing here if necessary | ||
43 | + pass | ||
44 | + | ||
45 | +stream.stop_stream() | ||
46 | +stream.close() | ||
47 | +p.terminate() |
output.wav
deleted
100644 → 0
No preview for this file type
sets.py
0 → 100644
1 | +import base64 | ||
2 | + | ||
3 | +task_list = [] | ||
4 | + | ||
5 | + | ||
6 | +def display_menu(): | ||
7 | + print("일정 관리자") | ||
8 | + print("1. 일정 추가") | ||
9 | + print("2. 일정 보기") | ||
10 | + print("3. 일정 완료 표시") | ||
11 | + print("4. 종료") | ||
12 | + | ||
13 | + | ||
14 | +def add_task(): | ||
15 | + title = input("일정 제목 입력: ") | ||
16 | + description = input("일정 설명 입력: ") | ||
17 | + status = "하는 중" | ||
18 | + task = { "title": title, "description": description, "status": status} | ||
19 | + task_list.append(task) | ||
20 | + print("일정이 추가되었습니다.") | ||
21 | + | ||
22 | + | ||
23 | +def view_tasks(): | ||
24 | + if not task_list: | ||
25 | + print("일정 목록이 비어 있습니다.") | ||
26 | + else: | ||
27 | + print() | ||
28 | + print("일정 목록:") | ||
29 | + print("----------------") | ||
30 | + for task in task_list: | ||
31 | + print(f"제목: {task['title']}") | ||
32 | + print(f"설명: {task['description']}") | ||
33 | + print(f"상태: {task['status']}") | ||
34 | + print("----------------") | ||
35 | + | ||
36 | + | ||
37 | +def mark_task_complete(): | ||
38 | + if not task_list: | ||
39 | + print("일정 목록이 비어 있습니다.") | ||
40 | + return | ||
41 | + | ||
42 | + title = input("완료로 표시할 일정의 제목 입력: ") | ||
43 | + for task in task_list: | ||
44 | + if task['title'] == title: | ||
45 | + task['status'] = "완료" | ||
46 | + print("일정이 완료로 표시되었습니다.") | ||
47 | + return | ||
48 | + | ||
49 | + print("식별자와 일치하는 일정을 찾을 수 없습니다.") | ||
50 | + | ||
51 | + | ||
52 | +while True: | ||
53 | + display_menu() | ||
54 | + choice = input("선택: ") | ||
55 | + | ||
56 | + if choice == "1": | ||
57 | + add_task() | ||
58 | + elif choice == "2": | ||
59 | + view_tasks() | ||
60 | + elif choice == "3": | ||
61 | + mark_task_complete() | ||
62 | + elif choice == "4": | ||
63 | + print("프로그램을 종료합니다.") | ||
64 | + break | ||
65 | + else: | ||
66 | + print("올바른 선택지를 입력하세요.") | ||
67 | + print() | ||
68 | + |
streamToOut.py
deleted
100644 → 0
1 | -import pyaudio | ||
2 | -from pydub import AudioSegment | ||
3 | -from pydub.effects import normalize | ||
4 | - | ||
5 | -# set up PyAudio | ||
6 | -pa = pyaudio.PyAudio() | ||
7 | -stream = pa.open(format=pyaudio.paInt16, | ||
8 | - channels=1, | ||
9 | - rate=44100, | ||
10 | - input=True, | ||
11 | - frames_per_buffer=1024) | ||
12 | - | ||
13 | -# record some audio from the microphone | ||
14 | -audio_data = [] | ||
15 | -for i in range(0, int(44100 / 1024 * 5)): | ||
16 | - data = stream.read(1024) | ||
17 | - audio_data.append(data) | ||
18 | - | ||
19 | -# convert the audio data to a PyDub audio segment | ||
20 | -audio_segment = AudioSegment( | ||
21 | - data=b''.join(audio_data), | ||
22 | - sample_width=2, | ||
23 | - frame_rate=44100, | ||
24 | - channels=1 | ||
25 | -) | ||
26 | - | ||
27 | -# apply an echo effect to the audio segment | ||
28 | -echoed_segment = normalize(audio_segment) | ||
29 | - | ||
30 | -# save the output audio file | ||
31 | -echoed_segment.export("output.mp3", format="mp3") | ||
32 | - | ||
33 | -# clean up | ||
34 | -stream.stop_stream() | ||
35 | -stream.close() | ||
36 | -pa.terminate() |
stream_echo.py
0 → 100644
1 | +import sys | ||
2 | +import numpy as np | ||
3 | +import pyaudio | ||
4 | +import librosa | ||
5 | + | ||
6 | +RECORD_SECONDS = 5 | ||
7 | +CHUNK = 1024 | ||
8 | +RATE = 44100 | ||
9 | +DELAY = 0.1 # Delay time in seconds | ||
10 | +GAIN = 1 # Echo gain (0 to 1) | ||
11 | +MAX_FREQ = 3000 | ||
12 | + | ||
13 | +# Create buffer for delayed audio data | ||
14 | +buffer_size = int(RATE * DELAY) | ||
15 | +buffer = np.zeros(buffer_size, dtype=np.int16) | ||
16 | + | ||
17 | +def add_echo(in_data, frame_count, time_info, status_flags): | ||
18 | + global buffer | ||
19 | + data = np.frombuffer(in_data, dtype=np.int16) | ||
20 | + | ||
21 | + def get_max_average_db(data): | ||
22 | + data_float = data.astype(np.float32) | ||
23 | + | ||
24 | + # Compute the power spectrogram of the data | ||
25 | + S = librosa.stft(data_float, n_fft=2048, hop_length=512) | ||
26 | + S_power = np.abs(S)**2 | ||
27 | + | ||
28 | + # Convert power spectrogram to dB scale | ||
29 | + S_dB = librosa.amplitude_to_db(S_power, ref=np.max) | ||
30 | + | ||
31 | + # Calculate the average dB level | ||
32 | + avg_dB = np.mean(S_dB) | ||
33 | + max_dB = np.max(S_dB) | ||
34 | + | ||
35 | + return avg_dB, max_dB | ||
36 | + | ||
37 | + def get_dominant_freq(data): | ||
38 | + data = data.astype(np.float32) / 32768.0 | ||
39 | + | ||
40 | + # Compute the Fourier transform of the data | ||
41 | + fft_data = np.fft.fft(data) | ||
42 | + | ||
43 | + # Compute the power spectral density of the data | ||
44 | + psd_data = np.abs(fft_data)**2 | ||
45 | + | ||
46 | + # Define the frequency range of interest | ||
47 | + freqs = np.fft.fftfreq(len(psd_data), d=1/RATE) | ||
48 | + | ||
49 | + # Compute the power spectrogram on the mel scale | ||
50 | + S = librosa.feature.melspectrogram(y=data, sr=RATE, n_fft=2048, hop_length=1024) | ||
51 | + | ||
52 | + # Find the frequency bin with the maximum energy in each frame | ||
53 | + max_bin = np.argmax(S, axis=0) | ||
54 | + | ||
55 | + # Find the dominant frequency in each frame | ||
56 | + dominant_freqs = freqs[max_bin] | ||
57 | + | ||
58 | + # Compute the median of the dominant frequencies to get the overall dominant frequency | ||
59 | + dominant_freq = np.median(dominant_freqs) | ||
60 | + | ||
61 | + return dominant_freq | ||
62 | + | ||
63 | + freq = get_dominant_freq(data) | ||
64 | + avg_db, max_db = get_max_average_db(data) | ||
65 | + print(int(freq), int(avg_db), int(max_db)) | ||
66 | + temp_gain = freq/MAX_FREQ | ||
67 | + output = data + freq/2500 * buffer[:len(data)] | ||
68 | + buffer = np.roll(buffer, len(data)) | ||
69 | + buffer[-len(data):] = data | ||
70 | + return (output.astype(np.int16).tostring(), pyaudio.paContinue) | ||
71 | + | ||
72 | + | ||
73 | +p = pyaudio.PyAudio() | ||
74 | +stream = p.open(format=p.get_format_from_width(2), | ||
75 | + channels=1 if sys.platform == 'darwin' else 2, | ||
76 | + rate=RATE, | ||
77 | + input=True, | ||
78 | + output=True, | ||
79 | + frames_per_buffer=CHUNK, | ||
80 | + stream_callback=add_echo | ||
81 | + ) | ||
82 | + | ||
83 | +print('* recording') | ||
84 | + | ||
85 | +stream.start_stream() | ||
86 | + | ||
87 | +while stream.is_active(): | ||
88 | + # Do other processing here if necessary | ||
89 | + pass | ||
90 | + | ||
91 | +stream.stop_stream() | ||
92 | +stream.close() | ||
93 | +p.terminate() |
testForPort.py
deleted
100644 → 0
1 | -import numpy as np | ||
2 | -import pyaudio | ||
3 | -import time | ||
4 | - | ||
5 | -pa = pyaudio.PyAudio() | ||
6 | -delay_buffer = np.zeros((44100, 1), dtype=np.float32) | ||
7 | - | ||
8 | -def callback(in_data, frame_count, time_info, status): | ||
9 | - global delay_buffer | ||
10 | - audio_data = np.frombuffer(in_data, dtype=np.float32).reshape(1024, 1) | ||
11 | - delayed_data = np.concatenate((delay_buffer, audio_data)) | ||
12 | - delay_buffer = delayed_data[frame_count:] | ||
13 | - return (audio_data + 0.5 * delay_buffer).tobytes(), pyaudio.paContinue | ||
14 | - | ||
15 | -stream = pa.open(format=pyaudio.paFloat32, | ||
16 | - channels=1, | ||
17 | - rate=1024, | ||
18 | - input=True, | ||
19 | - output=True, | ||
20 | - frames_per_buffer=44100, | ||
21 | - stream_callback=callback) | ||
22 | -start = time.time() | ||
23 | -DURATION = 30 | ||
24 | -# keep the stream running for a few seconds | ||
25 | -while stream.is_active() and (time.time() - start) < DURATION: | ||
26 | - time.sleep(0.1) | ||
27 | - | ||
28 | -stream.stop() | ||
29 | -stream.close() | ||
30 | -pa.terminate() | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
visualize.py
0 → 100644
1 | +import pyaudio | ||
2 | +import numpy as np | ||
3 | +import matplotlib.pyplot as plt | ||
4 | +import librosa | ||
5 | +import threading | ||
6 | +import sys | ||
7 | + | ||
8 | +print = sys.stdout.write | ||
9 | + | ||
10 | + | ||
11 | +# Define constants for audio parameters | ||
12 | +FORMAT = pyaudio.paFloat32 | ||
13 | +CHANNELS = 1 | ||
14 | +RATE = 44100 | ||
15 | +FRAMES_PER_BUFFER = 1024 | ||
16 | +DELAY = 0.1 | ||
17 | +GAIN = 0.5 | ||
18 | + | ||
19 | +# Open an audio stream | ||
20 | +stream = pyaudio.PyAudio().open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=FRAMES_PER_BUFFER) | ||
21 | + | ||
22 | + | ||
23 | +sound = [] | ||
24 | +def get_stream_data(): | ||
25 | + global sound | ||
26 | + sound.append(stream.read(FRAMES_PER_BUFFER*5, False)) | ||
27 | + | ||
28 | +get_stream_data() | ||
29 | + | ||
30 | + | ||
31 | +# Create buffer for delayed audio data | ||
32 | +buffer_size = int(RATE * DELAY) | ||
33 | +buffer = np.zeros(buffer_size, dtype=np.int16) | ||
34 | + | ||
35 | +def add_echo(in_data, frame_count, time_info, status_flags): | ||
36 | + global buffer | ||
37 | + data = np.frombuffer(in_data, dtype=np.int16) | ||
38 | + output = data + GAIN * buffer[:len(data)] | ||
39 | + buffer = np.roll(buffer, len(data)) | ||
40 | + buffer[-len(data):] = data | ||
41 | + return (output.astype(np.int16).tostring(), pyaudio.paContinue) | ||
42 | + | ||
43 | + | ||
44 | + | ||
45 | +def get_max_average_db(): | ||
46 | + global sound | ||
47 | + data = sound[-1] | ||
48 | + # Convert data to numpy array | ||
49 | + data_float = np.frombuffer(data, dtype=np.float32) | ||
50 | + | ||
51 | + # Compute the power spectrogram of the data | ||
52 | + S = librosa.stft(data_float, n_fft=2048, hop_length=512) | ||
53 | + S_power = np.abs(S)**2 | ||
54 | + | ||
55 | + # Convert power spectrogram to dB scale | ||
56 | + S_dB = librosa.amplitude_to_db(S_power, ref=np.max) | ||
57 | + | ||
58 | + # Calculate the average dB level | ||
59 | + avg_dB = np.mean(S_dB) | ||
60 | + | ||
61 | + print("Average dB: {:.2f}".format(avg_dB) + " "+ "Max dB: {:.2f}".format(np.max(S_dB)) + "\n") | ||
62 | + | ||
63 | + | ||
64 | +def print_dominant_freq(): | ||
65 | + global sound | ||
66 | + data = sound[-1] | ||
67 | + | ||
68 | + # Convert data to numpy array | ||
69 | + data = np.frombuffer(data, dtype=np.float32) | ||
70 | + | ||
71 | + # Compute the Fourier transform of the data | ||
72 | + fft_data = np.fft.fft(data) | ||
73 | + | ||
74 | + # Compute the power spectral density of the data | ||
75 | + psd_data = np.abs(fft_data)**2 | ||
76 | + | ||
77 | + # Define the frequency range of interest | ||
78 | + freqs = np.fft.fftfreq(len(psd_data), d=1/RATE) | ||
79 | + | ||
80 | + | ||
81 | + # Compute the power spectrogram on the mel scale | ||
82 | + S = librosa.feature.melspectrogram(y=data, sr=RATE, n_fft=2048, hop_length=1024, n_mels=512) | ||
83 | + | ||
84 | + # Find the frequency bin with the maximum energy in each frame | ||
85 | + max_bin = np.argmax(S, axis=0) | ||
86 | + | ||
87 | + # Find the dominant frequency in each frame | ||
88 | + dominant_freqs = freqs[max_bin] | ||
89 | + | ||
90 | + # Compute the median of the dominant frequencies to get the overall dominant frequency | ||
91 | + dominant_freq = np.median(dominant_freqs) | ||
92 | + | ||
93 | + print("Dominant frequency: {:.2f} Hz\n".format(dominant_freq)) | ||
94 | + | ||
95 | + | ||
96 | +threading.Thread(target=get_stream_data).start() | ||
97 | + | ||
98 | +while True: | ||
99 | + get_data = threading.Thread(target=get_stream_data) | ||
100 | + calc_data = threading.Thread(target=print_dominant_freq) | ||
101 | + #get_decibel = threading.Thread(target=get_max_average_db) | ||
102 | + | ||
103 | + get_data.start() | ||
104 | + calc_data.start() | ||
105 | + #get_decibel.start() | ||
106 | + | ||
107 | + get_data.join() | ||
108 | + | ||
109 | + sys.stdin.flush() |
what_to_do
0 → 100644
1 | import sys | 1 | import sys |
2 | - | 2 | +import numpy as np |
3 | import pyaudio | 3 | import pyaudio |
4 | +import matplotlib.pyplot as plt | ||
4 | 5 | ||
5 | RECORD_SECONDS = 5 | 6 | RECORD_SECONDS = 5 |
6 | CHUNK = 1024 | 7 | CHUNK = 1024 |
... | @@ -16,15 +17,54 @@ stream = p.open(format=p.get_format_from_width(2), | ... | @@ -16,15 +17,54 @@ stream = p.open(format=p.get_format_from_width(2), |
16 | 17 | ||
17 | print('* recording') | 18 | print('* recording') |
18 | 19 | ||
20 | +# Initialize plot | ||
21 | +fig, ax = plt.subplots() | ||
22 | +x = np.arange(0, RECORD_SECONDS, CHUNK / RATE) | ||
23 | +line, = ax.plot(x, np.zeros(len(x))) | ||
24 | + | ||
19 | def add_echo(data, output_stream): | 25 | def add_echo(data, output_stream): |
20 | output_stream.write(data) | 26 | output_stream.write(data) |
21 | 27 | ||
28 | +# Initialize data arrays | ||
29 | +db_data = np.zeros(len(x)) | ||
30 | + | ||
22 | 31 | ||
23 | for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): | 32 | for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): |
24 | - add_echo(stream.read(CHUNK), stream) | 33 | + byte_stream = stream.read(CHUNK) |
34 | + data = np.frombuffer(byte_stream, dtype=np.int16) | ||
35 | + fft_data = np.fft.fft(data) | ||
36 | + # 주파수 대역 설정 | ||
37 | + freq_range = (200, 2000) # 200Hz ~ 2kHz | ||
38 | + | ||
39 | + # 주파수 스펙트럼에서 주파수 대역 추출 | ||
40 | + freq_spectrum = fft_data[(freq_range[0] // (RATE // len(data))) : (freq_range[1] // (RATE // len(data)))] | ||
41 | + | ||
42 | + # 에너지 계산 | ||
43 | + energy = np.sum(np.abs(freq_spectrum)**2) | ||
44 | + # 소리의 세기 계산 | ||
45 | + db = 10 * np.log10(energy) | ||
25 | 46 | ||
47 | + # 소리의 높낮이 계산 | ||
48 | + max_freq = (np.argmax(np.abs(freq_spectrum)) * RATE) / len(data) | ||
49 | + print("freq : ", max_freq) | ||
50 | + print("db : ", db) | ||
51 | + | ||
52 | + # Add data to arrays | ||
53 | + db_data = np.roll(db_data, -1) | ||
54 | + db_data[-1] = db | ||
55 | + | ||
56 | + # Update plot | ||
57 | + line.set_ydata(db_data) | ||
58 | + ax.relim() | ||
59 | + ax.autoscale_view() | ||
60 | + | ||
61 | + plt.draw() | ||
62 | + plt.pause(0.001) | ||
26 | 63 | ||
27 | print('* done') | 64 | print('* done') |
28 | 65 | ||
29 | stream.close() | 66 | stream.close() |
30 | -p.terminate() | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
67 | +p.terminate() | ||
68 | + | ||
69 | +# Show plot | ||
70 | +plt.show() | ... | ... |
-
Please register or login to post a comment