You need to sign in or sign up before continuing.
khu

backup

This file is too large to display.
CapsNet @ 7d884474
1 +Subproject commit 7d8844740c119ae66576be9510474a791240a745
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
1 +{
2 + "cells": [
3 + {
4 + "cell_type": "code",
5 + "execution_count": 3,
6 + "metadata": {},
7 + "outputs": [],
8 + "source": [
9 + "import matplotlib.pyplot as plt\n",
10 + "from scipy.fftpack import fft\n",
11 + "from scipy.io import wavfile # get the api"
12 + ]
13 + },
14 + {
15 + "cell_type": "code",
16 + "execution_count": 4,
17 + "metadata": {
18 + "scrolled": false
19 + },
20 + "outputs": [
21 + {
22 + "ename": "TypeError",
23 + "evalue": "'numpy.int16' object is not iterable",
24 + "output_type": "error",
25 + "traceback": [
26 + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
27 + "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
28 + "\u001b[0;32m<ipython-input-4-c176e6e452f3>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0mfs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mwavfile\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'1.wav'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# load the data\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0ma\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mT\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;31m# this is a two channel soundtrack, I get the first track\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mb\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mele\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0;36m8.\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mele\u001b[0m \u001b[0;32min\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;31m# this is 8-bit track, b is now normalized on [-1,1)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4\u001b[0m \u001b[0mc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfft\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mb\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# calculate fourier transform (complex numbers list)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0md\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# you only need half of the fft list (real signal symmetry)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
29 + "\u001b[0;31mTypeError\u001b[0m: 'numpy.int16' object is not iterable"
30 + ]
31 + }
32 + ],
33 + "source": [
34 + "fs, data = wavfile.read('1.wav') # load the data\n",
35 + "a = data.T[0] # this is a two channel soundtrack, I get the first track\n",
36 + "b=[(ele/2**8.)*2-1 for ele in a] # this is 8-bit track, b is now normalized on [-1,1)\n",
37 + "c = fft(b) # calculate fourier transform (complex numbers list)\n",
38 + "d = int(len(c)/2) # you only need half of the fft list (real signal symmetry)\n",
39 + "plt.plot(abs(c[:(d-1)]),'r') \n",
40 + "plt.show()"
41 + ]
42 + },
43 + {
44 + "cell_type": "code",
45 + "execution_count": 16,
46 + "metadata": {},
47 + "outputs": [],
48 + "source": [
49 + "#wav파일 프린트 할줄알아야함 \n",
50 + "#(채널 : 모노라면 1, 스테레오라면 2\n",
51 + "\n"
52 + ]
53 + },
54 + {
55 + "cell_type": "code",
56 + "execution_count": null,
57 + "metadata": {},
58 + "outputs": [],
59 + "source": []
60 + }
61 + ],
62 + "metadata": {
63 + "kernelspec": {
64 + "display_name": "Python 3",
65 + "language": "python",
66 + "name": "python3"
67 + },
68 + "language_info": {
69 + "codemirror_mode": {
70 + "name": "ipython",
71 + "version": 3
72 + },
73 + "file_extension": ".py",
74 + "mimetype": "text/x-python",
75 + "name": "python",
76 + "nbconvert_exporter": "python",
77 + "pygments_lexer": "ipython3",
78 + "version": "3.6.5"
79 + }
80 + },
81 + "nbformat": 4,
82 + "nbformat_minor": 2
83 +}
1 +{
2 + "cells": [
3 + {
4 + "cell_type": "code",
5 + "execution_count": 2,
6 + "metadata": {},
7 + "outputs": [],
8 + "source": [
9 + "import math\n",
10 + "import wave\n",
11 + "import sys\n",
12 + "import struct\n"
13 + ]
14 + },
15 + {
16 + "cell_type": "code",
17 + "execution_count": 3,
18 + "metadata": {},
19 + "outputs": [
20 + {
21 + "ename": "ValueError",
22 + "evalue": "invalid literal for int() with base 10: '/run/user/1000/jupyter/kernel-6454a929-4509-4b51-949d-f1c910f7ce09.json'",
23 + "output_type": "error",
24 + "traceback": [
25 + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
26 + "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
27 + "\u001b[0;32m<ipython-input-3-cc8beb2556b6>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0msample_rate\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetframerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mtotal_num_samps\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetnframes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0mfft_length\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margv\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m \u001b[0mnum_fft\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mtotal_num_samps\u001b[0m \u001b[0;34m/\u001b[0m \u001b[0mfft_length\u001b[0m \u001b[0;34m)\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0;36m2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
28 + "\u001b[0;31mValueError\u001b[0m: invalid literal for int() with base 10: '/run/user/1000/jupyter/kernel-6454a929-4509-4b51-949d-f1c910f7ce09.json'"
29 + ]
30 + }
31 + ],
32 + "source": [
33 + "# 웨이브 파일을 열어라\n",
34 + "fp = wave.open('birdsound.wav',\"rb\")\n",
35 + "sample_rate = fp.getframerate()\n",
36 + "total_num_samps = fp.getnframes()\n",
37 + "fft_length = int(sys.argv[2])\n",
38 + "num_fft = (total_num_samps / fft_length ) - 2"
39 + ]
40 + },
41 + {
42 + "cell_type": "code",
43 + "execution_count": null,
44 + "metadata": {},
45 + "outputs": [],
46 + "source": [
47 + "# 임시로 사용할 작업 배열을 만들어라\n",
48 + "temp = zeros((num_fft,fft_length),Float)"
49 + ]
50 + },
51 + {
52 + "cell_type": "code",
53 + "execution_count": null,
54 + "metadata": {},
55 + "outputs": [],
56 + "source": [
57 + "# 파일로부터 데이터를 읽어 들여라\n",
58 + "for i in range(num_fft):\n",
59 + " tempb = fp.readframes(fft_length);\n",
60 + " temp[i,:] = array(struct.unpack(\"%dB\"%(fft_length), \\\n",
61 + " tempb),Float) - 128.0\n",
62 + "fp.close()"
63 + ]
64 + },
65 + {
66 + "cell_type": "code",
67 + "execution_count": null,
68 + "metadata": {},
69 + "outputs": [],
70 + "source": [
71 + "# 데이터를 창틀화하라\n",
72 + "temp = temp * hamming(fft_length)\n",
73 + "\n",
74 + "# FFT를 사용하여 변환하라, 파워를 반환하라\n",
75 + "freq_pwr = 10*log10(1e-20+abs(real_fft(temp,fft_length))"
76 + ]
77 + },
78 + {
79 + "cell_type": "code",
80 + "execution_count": null,
81 + "metadata": {},
82 + "outputs": [],
83 + "source": [
84 + "# 결과를 도표하라\n",
85 + "n_out_pts = (fft_length / 2) + 1\n",
86 + "y_axis = 0.5*float(sample_rate) / n_out_pts * \\\n",
87 + " arange(n_out_pts)\n",
88 + "x_axis = (total_num_samps / float(sample_rate)) / \\\n",
89 + " num_fft * arange(num_fft)\n",
90 + "setvar(\"X\",\"Time (sec)\")\n",
91 + "setvar(\"Y\",\"Frequency (Hertz)\")\n",
92 + "conshade(freq_pwr,x_axis,y_axis)\n",
93 + "disfin()"
94 + ]
95 + }
96 + ],
97 + "metadata": {
98 + "kernelspec": {
99 + "display_name": "Python 3",
100 + "language": "python",
101 + "name": "python3"
102 + },
103 + "language_info": {
104 + "codemirror_mode": {
105 + "name": "ipython",
106 + "version": 3
107 + },
108 + "file_extension": ".py",
109 + "mimetype": "text/x-python",
110 + "name": "python",
111 + "nbconvert_exporter": "python",
112 + "pygments_lexer": "ipython3",
113 + "version": "3.6.5"
114 + }
115 + },
116 + "nbformat": 4,
117 + "nbformat_minor": 2
118 +}
This diff could not be displayed because it is too large.
1 +{
2 + "cells": [
3 + {
4 + "cell_type": "code",
5 + "execution_count": 2,
6 + "metadata": {},
7 + "outputs": [],
8 + "source": [
9 + "import wave\n",
10 + "import pyaudio"
11 + ]
12 + },
13 + {
14 + "cell_type": "code",
15 + "execution_count": 3,
16 + "metadata": {},
17 + "outputs": [],
18 + "source": [
19 + "def play_file(fname):\n",
20 + " #오디오 객체 생성\n",
21 + " wf = wave.open('output.wav','rb') # wave파일 할당해준 객체\n",
22 + " p = pyaudio.PyAudio() #파이오디오 할당해준 객체\n",
23 + " chunk = 1024\n",
24 + " \n",
25 + " #stream = pyaudio로 open 하는것\n",
26 + " stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n",
27 + " channels=wf.getnchannels(),\n",
28 + " rate=wf.getframerate(),\n",
29 + " output=True)\n",
30 + " \n",
31 + " #데이터 wav파일에서 읽기\n",
32 + " data = wf.readframes(chunk)\n",
33 + " \n",
34 + " #읽은 데이터 있는동안 \n",
35 + " while data !='':\n",
36 + " stream.write(data) #스트림에 데이터 쓰기 \n",
37 + " data = wf.readframes(chunk) #데이터 wav파일에서 다시읽기\n",
38 + " \n",
39 + " #객체 닫아주기\n",
40 + " stream.close()\n",
41 + " p.terminate()\n",
42 + " "
43 + ]
44 + },
45 + {
46 + "cell_type": "code",
47 + "execution_count": null,
48 + "metadata": {},
49 + "outputs": [],
50 + "source": [
51 + "play_file('output.wav')"
52 + ]
53 + },
54 + {
55 + "cell_type": "code",
56 + "execution_count": null,
57 + "metadata": {},
58 + "outputs": [],
59 + "source": []
60 + }
61 + ],
62 + "metadata": {
63 + "kernelspec": {
64 + "display_name": "Python 3",
65 + "language": "python",
66 + "name": "python3"
67 + },
68 + "language_info": {
69 + "codemirror_mode": {
70 + "name": "ipython",
71 + "version": 3
72 + },
73 + "file_extension": ".py",
74 + "mimetype": "text/x-python",
75 + "name": "python",
76 + "nbconvert_exporter": "python",
77 + "pygments_lexer": "ipython3",
78 + "version": "3.6.5"
79 + }
80 + },
81 + "nbformat": 4,
82 + "nbformat_minor": 2
83 +}
This diff is collapsed. Click to expand it.
1 +{
2 + "cells": [
3 + {
4 + "cell_type": "code",
5 + "execution_count": 1,
6 + "metadata": {},
7 + "outputs": [],
8 + "source": [
9 + " import numpy as np\n",
10 + " import pylab\n",
11 + " import matplotlib.pyplot as plt\n",
12 + " from scipy.io import wavfile\n",
13 + " import time\n",
14 + " import sys\n",
15 + " import seaborn as sns\n",
16 + " import pyaudio"
17 + ]
18 + },
19 + {
20 + "cell_type": "code",
21 + "execution_count": null,
22 + "metadata": {},
23 + "outputs": [],
24 + "source": [
25 + "\n",
26 + "i=0\n",
27 + "f,ax = plt.subplots(2)\n",
28 + "\n",
29 + "# Prepare the Plotting Environment with random starting values\n",
30 + "x = np.arange(10000)\n",
31 + "y = np.random.randn(10000)\n",
32 + "\n",
33 + "# Plot 0 is for raw audio data\n",
34 + "li, = ax[0].plot(x, y)\n",
35 + "ax[0].set_xlim(0,1000)\n",
36 + "ax[0].set_ylim(-5000,5000)\n",
37 + "ax[0].set_title(\"Raw Audio Signal\")\n",
38 + "# Plot 1 is for the FFT of the audio\n",
39 + "li2, = ax[1].plot(x, y)\n",
40 + "ax[1].set_xlim(0,5000)\n",
41 + "ax[1].set_ylim(-100,100)\n",
42 + "ax[1].set_title(\"Fast Fourier Transform\")\n",
43 + "# Show the plot, but without blocking updates\n",
44 + "plt.pause(0.01)\n",
45 + "plt.tight_layout()\n"
46 + ]
47 + },
48 + {
49 + "cell_type": "code",
50 + "execution_count": null,
51 + "metadata": {},
52 + "outputs": [],
53 + "source": [
54 + "\n",
55 + "FORMAT = pyaudio.paInt16 # We use 16bit format per sample\n",
56 + "CHANNELS = 1\n",
57 + "RATE = 44100\n",
58 + "CHUNK = 1024 # 1024bytes of data red from a buffer\n",
59 + "RECORD_SECONDS = 0.1\n",
60 + "WAVE_OUTPUT_FILENAME = \"file.wav\"\n",
61 + "\n",
62 + "audio = pyaudio.PyAudio()\n",
63 + "\n",
64 + "# start Recording\n",
65 + "stream = audio.open(format=FORMAT,\n",
66 + " channels=CHANNELS,\n",
67 + " rate=RATE,\n",
68 + " input=True)#,\n",
69 + " #frames_per_buffer=CHUNK)\n",
70 + "\n",
71 + "global keep_going\n",
72 + "keep_going = True\n",
73 + "\n",
74 + "def plot_data(in_data):\n",
75 + " # get and convert the data to float\n",
76 + " audio_data = np.fromstring(in_data, np.int16)\n",
77 + " # Fast Fourier Transform, 10*log10(abs) is to scale it to dB\n",
78 + " # and make sure it's not imaginary\n",
79 + " dfft = 10.*np.log10(abs(np.fft.rfft(audio_data)))\n",
80 + "\n",
81 + " # Force the new data into the plot, but without redrawing axes.\n",
82 + " # If uses plt.draw(), axes are re-drawn every time\n",
83 + " #print audio_data[0:10]\n",
84 + " #print dfft[0:10]\n",
85 + " #print\n",
86 + " li.set_xdata(np.arange(len(audio_data)))\n",
87 + " li.set_ydata(audio_data)\n",
88 + " li2.set_xdata(np.arange(len(dfft))*10.)\n",
89 + " li2.set_ydata(dfft)\n",
90 + "\n",
91 + " # Show the updated plot, but without blocking\n",
92 + " plt.pause(0.01)\n",
93 + " if keep_going:\n",
94 + " return True\n",
95 + " else:\n",
96 + " return False\n"
97 + ]
98 + },
99 + {
100 + "cell_type": "code",
101 + "execution_count": null,
102 + "metadata": {},
103 + "outputs": [],
104 + "source": [
105 + "\n",
106 + "# Open the connection and start streaming the data\n",
107 + "stream.start_stream()\n",
108 + "print (\"\\n+---------------------------------+\")\n",
109 + "print (\"| Press Ctrl+C to Break Recording |\")\n",
110 + "print (\"+---------------------------------+\\n\")\n",
111 + "\n",
112 + "# Loop so program doesn't end while the stream callback's\n",
113 + "# itself for new data\n",
114 + "while keep_going:\n",
115 + " try:\n",
116 + " plot_data(stream.read(CHUNK))\n",
117 + " except KeyboardInterrupt:\n",
118 + " keep_going=False\n",
119 + " except:\n",
120 + " pass\n",
121 + "\n",
122 + "# Close up shop (currently not used because KeyboardInterrupt\n",
123 + "# is the only way to close)\n",
124 + "stream.stop_stream()\n",
125 + "stream.close()\n",
126 + "\n",
127 + "audio.terminate()\n"
128 + ]
129 + },
130 + {
131 + "cell_type": "code",
132 + "execution_count": null,
133 + "metadata": {},
134 + "outputs": [],
135 + "source": []
136 + }
137 + ],
138 + "metadata": {
139 + "kernelspec": {
140 + "display_name": "Python 3",
141 + "language": "python",
142 + "name": "python3"
143 + },
144 + "language_info": {
145 + "codemirror_mode": {
146 + "name": "ipython",
147 + "version": 3
148 + },
149 + "file_extension": ".py",
150 + "mimetype": "text/x-python",
151 + "name": "python",
152 + "nbconvert_exporter": "python",
153 + "pygments_lexer": "ipython3",
154 + "version": "3.6.5"
155 + }
156 + },
157 + "nbformat": 4,
158 + "nbformat_minor": 2
159 +}
This diff could not be displayed because it is too large.
This diff is collapsed. Click to expand it.
1 +{
2 + "cells": [
3 + {
4 + "cell_type": "markdown",
5 + "metadata": {},
6 + "source": [
7 + "TEST할것\n",
8 + "<입력값>\n",
9 + "1. 일정 데시벨파워 이하는 다 지운 멜스펙토그램 <-아... 원래 데이터셋 새소리 어느정도 작은 소리까지 잡는겨\n",
10 + "2. mel filter 80 & axis=1평균을 뺀 멜스펙토그램\n",
11 + "3. 새소리 주파수 이하는 주파수대는 짜름\n",
12 + "5. 커널씩 보면서 차이나는것만 뽑아내기 (그주변만 비교하는게 있을거같은데)\n",
13 + "6. 멜필터 안씌운거 보고 새소리 주파수 이하 짤라버릴때등.. 안씌운거 보기\n",
14 + "\n",
15 + "<모델>\n",
16 + "1. 일반 뛰어난 CNN모델. 첫번째 모델이 왜 더 좋은지 보자\n",
17 + "3. C+RNN <- RNN 부분 잘 된건가?\n",
18 + "4. Capsul network\n",
19 + "\n",
20 + "<추가적>\n",
21 + "1. 라벨링 : 확실히 귀에 잘 들리고 눈에 잘 보이는 애들만 1로 라벨링, 희미한건 0으로 라벨링 \n",
22 + " -> 필드테스트 한 애들만 맞춘거 비율이 높도록 보이기. 머신이 새라고 한거중에 0인애들만 또 보여주기\n",
23 + "2. 필드테스트랑 원래하던거랑 왜안될까? -> 짹짹이가 데이터셋에 별로없거나 모델이 안좋거나.. (먼저 컴한테 분류시켜보고 판단?)\n",
24 + "3. 그경우, 찌르레기 소리에 초점을 맞춰서 저 패턴을 학습시키고 아예 그걸 찾도록 하는것도 나쁘지 않을듯\n",
25 + "4. 아이폰녹음이랑 뭐가다른지, 실제로 차이가 난건지도 봐야함. "
26 + ]
27 + },
28 + {
29 + "cell_type": "markdown",
30 + "metadata": {},
31 + "source": [
32 + "1. CRNN - RNN 코드 다시 보고 돌리기\n",
33 + "2. CNN - 논문1네 모델 돌리기\n",
34 + "3. 캡슐 네트워크 돌리기\n",
35 + "\n",
36 + "#### scipy / librosa 둘다로 mel spectogram 짜봤는데 librosa가 더 좋았음."
37 + ]
38 + }
39 + ],
40 + "metadata": {
41 + "kernelspec": {
42 + "display_name": "Python 3",
43 + "language": "python",
44 + "name": "python3"
45 + },
46 + "language_info": {
47 + "codemirror_mode": {
48 + "name": "ipython",
49 + "version": 3
50 + },
51 + "file_extension": ".py",
52 + "mimetype": "text/x-python",
53 + "name": "python",
54 + "nbconvert_exporter": "python",
55 + "pygments_lexer": "ipython3",
56 + "version": "3.6.5"
57 + }
58 + },
59 + "nbformat": 4,
60 + "nbformat_minor": 2
61 +}
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
This diff could not be displayed because it is too large.
This file is too large to display.
1 +{
2 + "cells": [
3 + {
4 + "cell_type": "code",
5 + "execution_count": 2,
6 + "metadata": {},
7 + "outputs": [],
8 + "source": [
9 + "import pyaudio\n",
10 + "import wave"
11 + ]
12 + },
13 + {
14 + "cell_type": "code",
15 + "execution_count": 6,
16 + "metadata": {},
17 + "outputs": [
18 + {
19 + "name": "stdout",
20 + "output_type": "stream",
21 + "text": [
22 + "* recording\n",
23 + "* done recording\n"
24 + ]
25 + }
26 + ],
27 + "source": [
28 + "CHUNK = 1024\n",
29 + "FORMAT = pyaudio.paInt16\n",
30 + "CHANNELS = 1\n",
31 + "RATE = 25600\n",
32 + "RECORD_SECONDS = 10\n",
33 + "WAVE_OUTPUT_FILENAME = \"test.wav\"\n",
34 + "\n",
35 + "p = pyaudio.PyAudio()\n",
36 + "\n",
37 + "stream = p.open(format=FORMAT,\n",
38 + " channels=CHANNELS,\n",
39 + " rate=RATE,\n",
40 + " input=True,\n",
41 + " frames_per_buffer=CHUNK)\n",
42 + "\n",
43 + "print(\"* recording\")\n",
44 + "\n",
45 + "frames = []\n",
46 + "\n",
47 + "for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n",
48 + " data = stream.read(CHUNK)\n",
49 + " frames.append(data)\n",
50 + "\n",
51 + "print(\"* done recording\")\n",
52 + "\n",
53 + "stream.stop_stream()\n",
54 + "stream.close()\n",
55 + "p.terminate()\n",
56 + "\n",
57 + "wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n",
58 + "wf.setnchannels(CHANNELS)\n",
59 + "wf.setsampwidth(p.get_sample_size(FORMAT))\n",
60 + "wf.setframerate(RATE)\n",
61 + "wf.writeframes(b''.join(frames))\n",
62 + "wf.close()"
63 + ]
64 + },
65 + {
66 + "cell_type": "code",
67 + "execution_count": null,
68 + "metadata": {},
69 + "outputs": [],
70 + "source": []
71 + },
72 + {
73 + "cell_type": "code",
74 + "execution_count": null,
75 + "metadata": {},
76 + "outputs": [],
77 + "source": []
78 + }
79 + ],
80 + "metadata": {
81 + "kernelspec": {
82 + "display_name": "Python 3",
83 + "language": "python",
84 + "name": "python3"
85 + },
86 + "language_info": {
87 + "codemirror_mode": {
88 + "name": "ipython",
89 + "version": 3
90 + },
91 + "file_extension": ".py",
92 + "mimetype": "text/x-python",
93 + "name": "python",
94 + "nbconvert_exporter": "python",
95 + "pygments_lexer": "ipython3",
96 + "version": "3.6.5"
97 + }
98 + },
99 + "nbformat": 4,
100 + "nbformat_minor": 2
101 +}
1 +{
2 + "cells": [
3 + {
4 + "cell_type": "code",
5 + "execution_count": 2,
6 + "metadata": {},
7 + "outputs": [],
8 + "source": [
9 + "import pydub"
10 + ]
11 + },
12 + {
13 + "cell_type": "code",
14 + "execution_count": 4,
15 + "metadata": {},
16 + "outputs": [
17 + {
18 + "name": "stdout",
19 + "output_type": "stream",
20 + "text": [
21 + "done\n"
22 + ]
23 + }
24 + ],
25 + "source": [
26 + "from pydub import AudioSegment\n",
27 + "\n",
28 + "for i in range(0,45):\n",
29 + " t1 = i * 10000 #Works in milliseconds\n",
30 + " t2 = (i+1) * 10000\n",
31 + " \n",
32 + " newAudio = AudioSegment.from_wav(\"./New/IMG_3867.wav\")\n",
33 + " newAudio = newAudio[t1:t2]\n",
34 + " newAudio.export('./field/IMG_3867%d.wav'%(i), format=\"wav\")\n",
35 + "\n",
36 + "print('done')"
37 + ]
38 + },
39 + {
40 + "cell_type": "code",
41 + "execution_count": null,
42 + "metadata": {},
43 + "outputs": [],
44 + "source": []
45 + }
46 + ],
47 + "metadata": {
48 + "kernelspec": {
49 + "display_name": "Python 3",
50 + "language": "python",
51 + "name": "python3"
52 + },
53 + "language_info": {
54 + "codemirror_mode": {
55 + "name": "ipython",
56 + "version": 3
57 + },
58 + "file_extension": ".py",
59 + "mimetype": "text/x-python",
60 + "name": "python",
61 + "nbconvert_exporter": "python",
62 + "pygments_lexer": "ipython3",
63 + "version": "3.6.5"
64 + }
65 + },
66 + "nbformat": 4,
67 + "nbformat_minor": 2
68 +}