JeungEunLee

data augmentated and size/rotate experiment : because output looks folded

......@@ -33,12 +33,27 @@
" image_channels = 3\n",
" self.data_files = data_files\n",
" self.shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, image_channels\n",
" \n",
" def get_image(iself,image_path, width, height, mode):\n",
" image = Image.open(image_path)\n",
" image = image.resize((width,height))\n",
" return np.array(image)\n",
"\n",
"\n",
" def get_batch(self,image_files, width, height, mode):\n",
" data_batch = np.array(\n",
" [self.get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)\n",
" \n",
" # Make sure the images are in 4 dimensions\n",
" if len(data_batch.shape) < 4:\n",
" data_batch = data_batch.reshape(data_batch.shape + (1,))\n",
" return data_batch\n",
"\n",
" def get_batches(self, batch_size):\n",
" IMAGE_MAX_VALUE = 255\n",
" current_index = 0\n",
" while current_index + batch_size <= self.shape[0]:\n",
" data_batch = get_batch(\n",
" data_batch = self.get_batch(\n",
" self.data_files[current_index:current_index + batch_size],\n",
" self.shape[1],self.shape[2],\n",
" self.image_mode)\n",
......@@ -219,10 +234,14 @@
" saver = tf.train.Saver()\n",
" sess.run(tf.global_variables_initializer())\n",
" \n",
" # continue training\n",
" # continue training\n",
" save_path = saver.save(sess, \"/tmp/model.ckpt\")\n",
" ckpt = tf.train.latest_checkpoint('./model/')\n",
" saver.restore(sess, save_path)\n",
" \n",
" #newsaver = tf.train.import_meta_graph('./model/70.meta')\n",
" #newsaver.restore(sess, tf.train.latest_checkpoint('./model/'))\n",
" \n",
" coord = tf.train.Coordinator()\n",
" threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n",
"\n",
......@@ -280,36 +299,388 @@
"name": "stdout",
"output_type": "stream",
"text": [
"140\n",
"5004\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"INFO:tensorflow:Restoring parameters from /tmp/model.ckpt\n",
"Epoch 1/200 Step 10... Discriminator Loss: 0.7986... Generator Loss: 2.7782\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 2/200 Step 20... Discriminator Loss: 0.7019... Generator Loss: 1.2096\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 2/200 Step 30... Discriminator Loss: 0.6407... Generator Loss: 1.7675\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 3/200 Step 40... Discriminator Loss: 0.9732... Generator Loss: 0.9018\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 3/200 Step 50... Discriminator Loss: 1.2455... Generator Loss: 2.2003\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 4/200 Step 60... Discriminator Loss: 0.9650... Generator Loss: 1.1981\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 4/200 Step 70... Discriminator Loss: 0.9376... Generator Loss: 1.6022\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 5/200 Step 80... Discriminator Loss: 0.9873... Generator Loss: 0.9408\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 5/200 Step 90... Discriminator Loss: 1.1370... Generator Loss: 2.2449\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 6/200 Step 100... Discriminator Loss: 0.9307... Generator Loss: 1.1019\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 6/200 Step 110... Discriminator Loss: 0.9045... Generator Loss: 1.3023\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 7/200 Step 120... Discriminator Loss: 1.4306... Generator Loss: 3.0811\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 7/200 Step 130... Discriminator Loss: 0.8306... Generator Loss: 1.4418\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 8/200 Step 140... Discriminator Loss: 1.0130... Generator Loss: 0.9772\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 8/200 Step 150... Discriminator Loss: 1.1253... Generator Loss: 2.7651\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 9/200 Step 160... Discriminator Loss: 1.2028... Generator Loss: 0.5614\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 9/200 Step 170... Discriminator Loss: 1.1864... Generator Loss: 0.6131\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 10/200 Step 180... Discriminator Loss: 0.8613... Generator Loss: 1.1399\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 10/200 Step 190... Discriminator Loss: 0.7570... Generator Loss: 1.9568\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 11/200 Step 200... Discriminator Loss: 0.8872... Generator Loss: 1.3420\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 12/200 Step 210... Discriminator Loss: 0.7758... Generator Loss: 1.3705\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 12/200 Step 220... Discriminator Loss: 0.9375... Generator Loss: 2.3697\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 13/200 Step 230... Discriminator Loss: 1.0274... Generator Loss: 2.6057\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 13/200 Step 240... Discriminator Loss: 0.8219... Generator Loss: 1.2095\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 14/200 Step 250... Discriminator Loss: 0.8607... Generator Loss: 1.8890\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 14/200 Step 260... Discriminator Loss: 0.8661... Generator Loss: 1.4806\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 15/200 Step 270... Discriminator Loss: 0.8005... Generator Loss: 1.6766\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 15/200 Step 280... Discriminator Loss: 0.8658... Generator Loss: 1.6609\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 16/200 Step 290... Discriminator Loss: 1.3357... Generator Loss: 0.5010\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 16/200 Step 300... Discriminator Loss: 0.8518... Generator Loss: 1.4408\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 17/200 Step 310... Discriminator Loss: 0.9052... Generator Loss: 1.2558\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 17/200 Step 320... Discriminator Loss: 0.9011... Generator Loss: 1.2468\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 18/200 Step 330... Discriminator Loss: 0.9880... Generator Loss: 0.8800\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 18/200 Step 340... Discriminator Loss: 0.9066... Generator Loss: 2.0460\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 19/200 Step 350... Discriminator Loss: 0.9169... Generator Loss: 1.7369\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 19/200 Step 360... Discriminator Loss: 0.9111... Generator Loss: 1.5251\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 20/200 Step 370... Discriminator Loss: 0.9466... Generator Loss: 1.0476\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 20/200 Step 380... Discriminator Loss: 1.0600... Generator Loss: 1.6264\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 21/200 Step 390... Discriminator Loss: 1.1503... Generator Loss: 0.9095\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 22/200 Step 400... Discriminator Loss: 1.1989... Generator Loss: 1.2204\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 22/200 Step 410... Discriminator Loss: 1.1530... Generator Loss: 0.8920\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 23/200 Step 420... Discriminator Loss: 1.2206... Generator Loss: 0.8665\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 23/200 Step 430... Discriminator Loss: 1.1357... Generator Loss: 1.0771\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 24/200 Step 440... Discriminator Loss: 1.5018... Generator Loss: 0.4140\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 24/200 Step 450... Discriminator Loss: 1.1407... Generator Loss: 0.9182\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 25/200 Step 460... Discriminator Loss: 1.1208... Generator Loss: 1.0497\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 25/200 Step 470... Discriminator Loss: 1.2283... Generator Loss: 1.3409\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 26/200 Step 480... Discriminator Loss: 1.1401... Generator Loss: 0.8807\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 26/200 Step 490... Discriminator Loss: 1.1839... Generator Loss: 0.7198\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 27/200 Step 500... Discriminator Loss: 1.5919... Generator Loss: 0.3560\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 27/200 Step 510... Discriminator Loss: 1.2166... Generator Loss: 1.4234\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 28/200 Step 520... Discriminator Loss: 1.1838... Generator Loss: 1.2357\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 28/200 Step 530... Discriminator Loss: 1.2062... Generator Loss: 1.4508\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 29/200 Step 540... Discriminator Loss: 1.2600... Generator Loss: 1.5470\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 29/200 Step 550... Discriminator Loss: 1.1592... Generator Loss: 0.9399\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 30/200 Step 560... Discriminator Loss: 1.1941... Generator Loss: 1.0776\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 30/200 Step 570... Discriminator Loss: 1.5479... Generator Loss: 2.1296\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 31/200 Step 580... Discriminator Loss: 1.3233... Generator Loss: 0.8222\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 32/200 Step 590... Discriminator Loss: 1.1821... Generator Loss: 0.9809\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"INFO:tensorflow:Restoring parameters from /tmp/model.ckpt\n"
"Epoch 32/200 Step 600... Discriminator Loss: 1.1763... Generator Loss: 0.7344\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 33/200 Step 610... Discriminator Loss: 1.1730... Generator Loss: 1.3747\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 33/200 Step 620... Discriminator Loss: 1.5791... Generator Loss: 0.3566\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 34/200 Step 630... Discriminator Loss: 1.4445... Generator Loss: 0.4481\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 34/200 Step 640... Discriminator Loss: 1.1244... Generator Loss: 1.1338\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 35/200 Step 650... Discriminator Loss: 1.1750... Generator Loss: 0.9281\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 35/200 Step 660... Discriminator Loss: 1.2072... Generator Loss: 1.1870\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 36/200 Step 670... Discriminator Loss: 1.2960... Generator Loss: 0.5793\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 36/200 Step 680... Discriminator Loss: 1.1635... Generator Loss: 1.0436\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n"
]
},
{
"ename": "FileExistsError",
"evalue": "[Errno 17] File exists: 'output'",
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mFileExistsError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-10-3cf64f8b526a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mceleba_dataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mDataset\u001b[0m\u001b[0;34m(\u001b[0m \u001b[0mglob\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'./motionpatch/*.png'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepochs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mz_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlearning_rate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbeta1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_batches\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimage_mode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m<ipython-input-8-4eafe8fdaf6d>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode, print_every, show_every)\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0mthreads\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstart_queue_runners\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcoord\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcoord\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0;31m#sess.run(tf.global_variables_initializer())\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 27\u001b[0;31m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmkdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'output'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 28\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mepoch_i\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepoch_count\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mbatch_images\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mget_batches\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mFileExistsError\u001b[0m: [Errno 17] File exists: 'output'"
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-10-bbe3447e21dd>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mceleba_dataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mDataset\u001b[0m\u001b[0;34m(\u001b[0m \u001b[0mglob\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'./smallone/*.png'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepochs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mz_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlearning_rate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbeta1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_batches\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimage_mode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m<ipython-input-8-2e8656e87584>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode, print_every, show_every)\u001b[0m\n\u001b[1;32m 41\u001b[0m \u001b[0;31m# Run optimizers\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0md_train_opt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0minput_real\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbatch_images\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_z\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbatch_z\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 43\u001b[0;31m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg_train_opt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0minput_z\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbatch_z\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 44\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 45\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0msteps\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0mprint_every\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda2/envs/actionGAN/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 875\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 876\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 877\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 878\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 879\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda2/envs/actionGAN/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1098\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1099\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1100\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1101\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1102\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda2/envs/actionGAN/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1270\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1271\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1272\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1273\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1274\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda2/envs/actionGAN/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1276\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1277\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1278\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1279\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1280\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda2/envs/actionGAN/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1261\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1262\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1263\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1264\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1265\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda2/envs/actionGAN/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1348\u001b[0m return tf_session.TF_SessionRun_wrapper(\n\u001b[1;32m 1349\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1350\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1351\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1352\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_call_tf_sessionprun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"batch_size = 50\n",
"batch_size = 256\n",
"z_dim = 100\n",
"learning_rate = 0.00025\n",
"beta1 = 0.45\n",
"\n",
"epochs = 500\n",
"print(len(glob('./motionpatch/*.png')))\n",
"celeba_dataset = Dataset( glob('./motionpatch/*.png'))\n",
"epochs = 200\n",
"print(len(glob('./smallone/*.png')))\n",
"celeba_dataset = Dataset( glob('./smallone/*.png'))\n",
"with tf.Graph().as_default():\n",
" train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches, celeba_dataset.shape, celeba_dataset.image_mode)"
]
......
{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"from glob import glob\n",
"import os \n",
"\n",
"motionpatch_location = './motionpatch/*.png'\n",
"output_location = './smallone/'\n",
"count = 0\n",
"for f in glob(motionpatch_location):\n",
" count += 1\n",
" image = cv2.imread(f)\n",
" small = cv2.resize(image,dsize=(25,25))\n",
" dst = os.path.join(output_location +str(count)+\".png\")\n",
" cv2.imwrite(dst,small)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
......@@ -34,7 +34,7 @@
" self.data_files = data_files\n",
" self.shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, image_channels\n",
" \n",
" def get_image(iself,mage_path, width, height, mode):\n",
" def get_image(iself,image_path, width, height, mode):\n",
" image = Image.open(image_path)\n",
" image = image.resize((width,height))\n",
" return np.array(image)\n",
......@@ -234,10 +234,14 @@
" saver = tf.train.Saver()\n",
" sess.run(tf.global_variables_initializer())\n",
" \n",
" # continue training\n",
" # continue training\n",
" save_path = saver.save(sess, \"/tmp/model.ckpt\")\n",
" ckpt = tf.train.latest_checkpoint('./model/')\n",
" saver.restore(sess, save_path)\n",
" \n",
" #newsaver = tf.train.import_meta_graph('./model/70.meta')\n",
" #newsaver.restore(sess, tf.train.latest_checkpoint('./model/'))\n",
" \n",
" coord = tf.train.Coordinator()\n",
" threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n",
"\n",
......@@ -286,7 +290,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 10,
"metadata": {
"scrolled": true
},
......@@ -295,40 +299,388 @@
"name": "stdout",
"output_type": "stream",
"text": [
"140\n",
"5004\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"INFO:tensorflow:Restoring parameters from /tmp/model.ckpt\n",
"Epoch 1/200 Step 10... Discriminator Loss: 0.7986... Generator Loss: 2.7782\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 2/200 Step 20... Discriminator Loss: 0.7019... Generator Loss: 1.2096\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 2/200 Step 30... Discriminator Loss: 0.6407... Generator Loss: 1.7675\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 3/200 Step 40... Discriminator Loss: 0.9732... Generator Loss: 0.9018\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 3/200 Step 50... Discriminator Loss: 1.2455... Generator Loss: 2.2003\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 4/200 Step 60... Discriminator Loss: 0.9650... Generator Loss: 1.1981\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 4/200 Step 70... Discriminator Loss: 0.9376... Generator Loss: 1.6022\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 5/200 Step 80... Discriminator Loss: 0.9873... Generator Loss: 0.9408\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 5/200 Step 90... Discriminator Loss: 1.1370... Generator Loss: 2.2449\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 6/200 Step 100... Discriminator Loss: 0.9307... Generator Loss: 1.1019\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 6/200 Step 110... Discriminator Loss: 0.9045... Generator Loss: 1.3023\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 7/200 Step 120... Discriminator Loss: 1.4306... Generator Loss: 3.0811\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 7/200 Step 130... Discriminator Loss: 0.8306... Generator Loss: 1.4418\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 8/200 Step 140... Discriminator Loss: 1.0130... Generator Loss: 0.9772\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 8/200 Step 150... Discriminator Loss: 1.1253... Generator Loss: 2.7651\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 9/200 Step 160... Discriminator Loss: 1.2028... Generator Loss: 0.5614\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 9/200 Step 170... Discriminator Loss: 1.1864... Generator Loss: 0.6131\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 10/200 Step 180... Discriminator Loss: 0.8613... Generator Loss: 1.1399\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 10/200 Step 190... Discriminator Loss: 0.7570... Generator Loss: 1.9568\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 11/200 Step 200... Discriminator Loss: 0.8872... Generator Loss: 1.3420\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 12/200 Step 210... Discriminator Loss: 0.7758... Generator Loss: 1.3705\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 12/200 Step 220... Discriminator Loss: 0.9375... Generator Loss: 2.3697\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 13/200 Step 230... Discriminator Loss: 1.0274... Generator Loss: 2.6057\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 13/200 Step 240... Discriminator Loss: 0.8219... Generator Loss: 1.2095\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 14/200 Step 250... Discriminator Loss: 0.8607... Generator Loss: 1.8890\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 14/200 Step 260... Discriminator Loss: 0.8661... Generator Loss: 1.4806\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 15/200 Step 270... Discriminator Loss: 0.8005... Generator Loss: 1.6766\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 15/200 Step 280... Discriminator Loss: 0.8658... Generator Loss: 1.6609\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 16/200 Step 290... Discriminator Loss: 1.3357... Generator Loss: 0.5010\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 16/200 Step 300... Discriminator Loss: 0.8518... Generator Loss: 1.4408\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 17/200 Step 310... Discriminator Loss: 0.9052... Generator Loss: 1.2558\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 17/200 Step 320... Discriminator Loss: 0.9011... Generator Loss: 1.2468\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 18/200 Step 330... Discriminator Loss: 0.9880... Generator Loss: 0.8800\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 18/200 Step 340... Discriminator Loss: 0.9066... Generator Loss: 2.0460\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 19/200 Step 350... Discriminator Loss: 0.9169... Generator Loss: 1.7369\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 19/200 Step 360... Discriminator Loss: 0.9111... Generator Loss: 1.5251\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 20/200 Step 370... Discriminator Loss: 0.9466... Generator Loss: 1.0476\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 20/200 Step 380... Discriminator Loss: 1.0600... Generator Loss: 1.6264\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 21/200 Step 390... Discriminator Loss: 1.1503... Generator Loss: 0.9095\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 22/200 Step 400... Discriminator Loss: 1.1989... Generator Loss: 1.2204\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 22/200 Step 410... Discriminator Loss: 1.1530... Generator Loss: 0.8920\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 23/200 Step 420... Discriminator Loss: 1.2206... Generator Loss: 0.8665\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 23/200 Step 430... Discriminator Loss: 1.1357... Generator Loss: 1.0771\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 24/200 Step 440... Discriminator Loss: 1.5018... Generator Loss: 0.4140\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 24/200 Step 450... Discriminator Loss: 1.1407... Generator Loss: 0.9182\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 25/200 Step 460... Discriminator Loss: 1.1208... Generator Loss: 1.0497\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 25/200 Step 470... Discriminator Loss: 1.2283... Generator Loss: 1.3409\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 26/200 Step 480... Discriminator Loss: 1.1401... Generator Loss: 0.8807\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 26/200 Step 490... Discriminator Loss: 1.1839... Generator Loss: 0.7198\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 27/200 Step 500... Discriminator Loss: 1.5919... Generator Loss: 0.3560\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 27/200 Step 510... Discriminator Loss: 1.2166... Generator Loss: 1.4234\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 28/200 Step 520... Discriminator Loss: 1.1838... Generator Loss: 1.2357\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 28/200 Step 530... Discriminator Loss: 1.2062... Generator Loss: 1.4508\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 29/200 Step 540... Discriminator Loss: 1.2600... Generator Loss: 1.5470\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 29/200 Step 550... Discriminator Loss: 1.1592... Generator Loss: 0.9399\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 30/200 Step 560... Discriminator Loss: 1.1941... Generator Loss: 1.0776\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 30/200 Step 570... Discriminator Loss: 1.5479... Generator Loss: 2.1296\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 31/200 Step 580... Discriminator Loss: 1.3233... Generator Loss: 0.8222\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 32/200 Step 590... Discriminator Loss: 1.1821... Generator Loss: 0.9809\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"INFO:tensorflow:Restoring parameters from /tmp/model.ckpt\n"
"Epoch 32/200 Step 600... Discriminator Loss: 1.1763... Generator Loss: 0.7344\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 33/200 Step 610... Discriminator Loss: 1.1730... Generator Loss: 1.3747\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 33/200 Step 620... Discriminator Loss: 1.5791... Generator Loss: 0.3566\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 34/200 Step 630... Discriminator Loss: 1.4445... Generator Loss: 0.4481\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 34/200 Step 640... Discriminator Loss: 1.1244... Generator Loss: 1.1338\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 35/200 Step 650... Discriminator Loss: 1.1750... Generator Loss: 0.9281\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 35/200 Step 660... Discriminator Loss: 1.2072... Generator Loss: 1.1870\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 36/200 Step 670... Discriminator Loss: 1.2960... Generator Loss: 0.5793\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"Epoch 36/200 Step 680... Discriminator Loss: 1.1635... Generator Loss: 1.0436\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n"
]
},
{
"ename": "NameError",
"evalue": "name 'image_path' is not defined",
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-9-3cf64f8b526a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mceleba_dataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mDataset\u001b[0m\u001b[0;34m(\u001b[0m \u001b[0mglob\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'./motionpatch/*.png'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepochs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mz_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlearning_rate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbeta1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_batches\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimage_mode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m<ipython-input-8-14a3faf19639>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode, print_every, show_every)\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmkdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'output'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mepoch_i\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepoch_count\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 29\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mbatch_images\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mget_batches\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 30\u001b[0m \u001b[0;31m# Train Model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0msteps\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-2-77ee1ea74a0f>\u001b[0m in \u001b[0;36mget_batches\u001b[0;34m(self, batch_size)\u001b[0m\n\u001b[1;32m 30\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata_files\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcurrent_index\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mcurrent_index\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 32\u001b[0;31m self.image_mode)\n\u001b[0m\u001b[1;32m 33\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0mcurrent_index\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-2-77ee1ea74a0f>\u001b[0m in \u001b[0;36mget_batch\u001b[0;34m(self, image_files, width, height, mode)\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mimage_files\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwidth\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m data_batch = np.array(\n\u001b[0;32m---> 18\u001b[0;31m [self.get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)\n\u001b[0m\u001b[1;32m 19\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;31m# Make sure the images are in 4 dimensions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-2-77ee1ea74a0f>\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mimage_files\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwidth\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m data_batch = np.array(\n\u001b[0;32m---> 18\u001b[0;31m [self.get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)\n\u001b[0m\u001b[1;32m 19\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;31m# Make sure the images are in 4 dimensions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-2-77ee1ea74a0f>\u001b[0m in \u001b[0;36mget_image\u001b[0;34m(iself, mage_path, width, height, mode)\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_image\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mmage_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwidth\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0mimage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mImage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0mimage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mimage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mwidth\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mheight\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'image_path' is not defined"
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-10-bbe3447e21dd>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mceleba_dataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mDataset\u001b[0m\u001b[0;34m(\u001b[0m \u001b[0mglob\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'./smallone/*.png'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepochs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mz_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlearning_rate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbeta1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_batches\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimage_mode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m<ipython-input-8-2e8656e87584>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode, print_every, show_every)\u001b[0m\n\u001b[1;32m 41\u001b[0m \u001b[0;31m# Run optimizers\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0md_train_opt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0minput_real\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbatch_images\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_z\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbatch_z\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 43\u001b[0;31m \u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg_train_opt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0minput_z\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbatch_z\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 44\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 45\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0msteps\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0mprint_every\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda2/envs/actionGAN/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 875\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 876\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 877\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 878\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 879\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda2/envs/actionGAN/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1098\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1099\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1100\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1101\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1102\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda2/envs/actionGAN/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1270\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1271\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1272\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1273\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1274\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda2/envs/actionGAN/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1276\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1277\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1278\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1279\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1280\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda2/envs/actionGAN/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1261\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1262\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1263\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1264\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1265\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda2/envs/actionGAN/lib/python3.5/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1348\u001b[0m return tf_session.TF_SessionRun_wrapper(\n\u001b[1;32m 1349\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1350\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1351\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1352\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_call_tf_sessionprun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"batch_size = 50\n",
"batch_size = 256\n",
"z_dim = 100\n",
"learning_rate = 0.00025\n",
"beta1 = 0.45\n",
"\n",
"epochs = 500\n",
"print(len(glob('./motionpatch/*.png')))\n",
"celeba_dataset = Dataset( glob('./motionpatch/*.png'))\n",
"epochs = 200\n",
"print(len(glob('./smallone/*.png')))\n",
"celeba_dataset = Dataset( glob('./smallone/*.png'))\n",
"with tf.Graph().as_default():\n",
" train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches, celeba_dataset.shape, celeba_dataset.image_mode)"
]
......
This file is too large to display.
......@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
......@@ -20,6 +20,49 @@
" dst = os.path.join(output_location +str(count)+\".png\")\n",
" cv2.imwrite(dst,small)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
" def __init__(self, data_files):\n",
" IMAGE_WIDTH = 25\n",
" IMAGE_HEIGHT = 25\n",
" self.image_mode = 'RGB'\n",
" image_channels = 3\n",
" self.data_files = data_files\n",
" self.shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, image_channels\n",
" \n",
" def get_image(iself,image_path, width, height, mode):\n",
" image = Image.open(image_path)\n",
" image = Image.im2double(image)\n",
" return np.array(image)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"from PIL import Image\n",
"import numpy as np\n",
"from matplotlib import pyplot\n",
"\n",
"imgloc = './smallone/5004.png'\n",
"image = Image.open(imgloc)\n",
"dst = os.path.join(\"./samples/5004.png\")\n",
"pyplot.imsave(dst,image)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
......
S001C002P005R002A008
S001C002P006R001A008
S001C003P002R001A055
S001C003P002R002A012
S001C003P005R002A004
S001C003P005R002A005
S001C003P005R002A006
S001C003P006R002A008
S002C002P011R002A030
S002C003P008R001A020
S002C003P010R002A010
S002C003P011R002A007
S002C003P011R002A011
S002C003P014R002A007
S003C001P019R001A055
S003C002P002R002A055
S003C002P018R002A055
S003C003P002R001A055
S003C003P016R001A055
S003C003P018R002A024
S004C002P003R001A013
S004C002P008R001A009
S004C002P020R001A003
S004C002P020R001A004
S004C002P020R001A012
S004C002P020R001A020
S004C002P020R001A021
S004C002P020R001A036
S005C002P004R001A001
S005C002P004R001A003
S005C002P010R001A016
S005C002P010R001A017
S005C002P010R001A048
S005C002P010R001A049
S005C002P016R001A009
S005C002P016R001A010
S005C002P018R001A003
S005C002P018R001A028
S005C002P018R001A029
S005C003P016R002A009
S005C003P018R002A013
S005C003P021R002A057
S006C001P001R002A055
S006C002P007R001A005
S006C002P007R001A006
S006C002P016R001A043
S006C002P016R001A051
S006C002P016R001A052
S006C002P022R001A012
S006C002P023R001A020
S006C002P023R001A021
S006C002P023R001A022
S006C002P023R001A023
S006C002P024R001A018
S006C002P024R001A019
S006C003P001R002A013
S006C003P007R002A009
S006C003P007R002A010
S006C003P007R002A025
S006C003P016R001A060
S006C003P017R001A055
S006C003P017R002A013
S006C003P017R002A014
S006C003P017R002A015
S006C003P022R002A013
S007C001P018R002A050
S007C001P025R002A051
S007C001P028R001A050
S007C001P028R001A051
S007C001P028R001A052
S007C002P008R002A008
S007C002P015R002A055
S007C002P026R001A008
S007C002P026R001A009
S007C002P026R001A010
S007C002P026R001A011
S007C002P026R001A012
S007C002P026R001A050
S007C002P027R001A011
S007C002P027R001A013
S007C002P028R002A055
S007C003P007R001A002
S007C003P007R001A004
S007C003P019R001A060
S007C003P027R002A001
S007C003P027R002A002
S007C003P027R002A003
S007C003P027R002A004
S007C003P027R002A005
S007C003P027R002A006
S007C003P027R002A007
S007C003P027R002A008
S007C003P027R002A009
S007C003P027R002A010
S007C003P027R002A011
S007C003P027R002A012
S007C003P027R002A013
S008C002P001R001A009
S008C002P001R001A010
S008C002P001R001A014
S008C002P001R001A015
S008C002P001R001A016
S008C002P001R001A018
S008C002P001R001A019
S008C002P008R002A059
S008C002P025R001A060
S008C002P029R001A004
S008C002P031R001A005
S008C002P031R001A006
S008C002P032R001A018
S008C002P034R001A018
S008C002P034R001A019
S008C002P035R001A059
S008C002P035R002A002
S008C002P035R002A005
S008C003P007R001A009
S008C003P007R001A016
S008C003P007R001A017
S008C003P007R001A018
S008C003P007R001A019
S008C003P007R001A020
S008C003P007R001A021
S008C003P007R001A022
S008C003P007R001A023
S008C003P007R001A025
S008C003P007R001A026
S008C003P007R001A028
S008C003P007R001A029
S008C003P007R002A003
S008C003P008R002A050
S008C003P025R002A002
S008C003P025R002A011
S008C003P025R002A012
S008C003P025R002A016
S008C003P025R002A020
S008C003P025R002A022
S008C003P025R002A023
S008C003P025R002A030
S008C003P025R002A031
S008C003P025R002A032
S008C003P025R002A033
S008C003P025R002A049
S008C003P025R002A060
S008C003P031R001A001
S008C003P031R002A004
S008C003P031R002A014
S008C003P031R002A015
S008C003P031R002A016
S008C003P031R002A017
S008C003P032R002A013
S008C003P033R002A001
S008C003P033R002A011
S008C003P033R002A012
S008C003P034R002A001
S008C003P034R002A012
S008C003P034R002A022
S008C003P034R002A023
S008C003P034R002A024
S008C003P034R002A044
S008C003P034R002A045
S008C003P035R002A016
S008C003P035R002A017
S008C003P035R002A018
S008C003P035R002A019
S008C003P035R002A020
S008C003P035R002A021
S009C002P007R001A001
S009C002P007R001A003
S009C002P007R001A014
S009C002P008R001A014
S009C002P015R002A050
S009C002P016R001A002
S009C002P017R001A028
S009C002P017R001A029
S009C003P017R002A030
S009C003P025R002A054
S010C001P007R002A020
S010C002P016R002A055
S010C002P017R001A005
S010C002P017R001A018
S010C002P017R001A019
S010C002P019R001A001
S010C002P025R001A012
S010C003P007R002A043
S010C003P008R002A003
S010C003P016R001A055
S010C003P017R002A055
S011C001P002R001A008
S011C001P018R002A050
S011C002P008R002A059
S011C002P016R002A055
S011C002P017R001A020
S011C002P017R001A021
S011C002P018R002A055
S011C002P027R001A009
S011C002P027R001A010
S011C002P027R001A037
S011C003P001R001A055
S011C003P002R001A055
S011C003P008R002A012
S011C003P015R001A055
S011C003P016R001A055
S011C003P019R001A055
S011C003P025R001A055
S011C003P028R002A055
S012C001P019R001A060
S012C001P019R002A060
S012C002P015R001A055
S012C002P017R002A012
S012C002P025R001A060
S012C003P008R001A057
S012C003P015R001A055
S012C003P015R002A055
S012C003P016R001A055
S012C003P017R002A055
S012C003P018R001A055
S012C003P018R001A057
S012C003P019R002A011
S012C003P019R002A012
S012C003P025R001A055
S012C003P027R001A055
S012C003P027R002A009
S012C003P028R001A035
S012C003P028R002A055
S013C001P015R001A054
S013C001P017R002A054
S013C001P018R001A016
S013C001P028R001A040
S013C002P015R001A054
S013C002P017R002A054
S013C002P028R001A040
S013C003P008R002A059
S013C003P015R001A054
S013C003P017R002A054
S013C003P025R002A022
S013C003P027R001A055
S013C003P028R001A040
S014C001P027R002A040
S014C002P015R001A003
S014C002P019R001A029
S014C002P025R002A059
S014C002P027R002A040
S014C002P039R001A050
S014C003P007R002A059
S014C003P015R002A055
S014C003P019R002A055
S014C003P025R001A048
S014C003P027R002A040
S015C001P008R002A040
S015C001P016R001A055
S015C001P017R001A055
S015C001P017R002A055
S015C002P007R001A059
S015C002P008R001A003
S015C002P008R001A004
S015C002P008R002A040
S015C002P015R001A002
S015C002P016R001A001
S015C002P016R002A055
S015C003P008R002A007
S015C003P008R002A011
S015C003P008R002A012
S015C003P008R002A028
S015C003P008R002A040
S015C003P025R002A012
S015C003P025R002A017
S015C003P025R002A020
S015C003P025R002A021
S015C003P025R002A030
S015C003P025R002A033
S015C003P025R002A034
S015C003P025R002A036
S015C003P025R002A037
S015C003P025R002A044
S016C001P019R002A040
S016C001P025R001A011
S016C001P025R001A012
S016C001P025R001A060
S016C001P040R001A055
S016C001P040R002A055
S016C002P008R001A011
S016C002P019R002A040
S016C002P025R002A012
S016C003P008R001A011
S016C003P008R002A002
S016C003P008R002A003
S016C003P008R002A004
S016C003P008R002A006
S016C003P008R002A009
S016C003P019R002A040
S016C003P039R002A016
S017C001P016R002A031
S017C002P007R001A013
S017C002P008R001A009
S017C002P015R001A042
S017C002P016R002A031
S017C002P016R002A055
S017C003P007R002A013
S017C003P008R001A059
S017C003P016R002A031
S017C003P017R001A055
S017C003P020R001A059
\ No newline at end of file
%your motion_patch location
ori = imread('/home/rfj/바탕화면/actionGAN/motion_patch/S001C001P001R001A020.png');
ori = imread('/home/rfj/바탕화면/actionGAN/DCGAN/new_motionpatch/sample_111.png');
ori = im2double(ori);
ori = ori(:,:,:);
......
clear;
%missing file delete
%LOCATION : raw skeletone files
path_name = '/media/rfj/EEA4441FA443E923/nturgb_skeletones/';
file_list = dir(path_name);
L = length(file_list);
fileID = fopen('/home/rfj/MATLAB/bin/samples_with_missing_skeletons.txt','r');
fileID = fopen('/home/rfj/바탕화면/actionGAN/skeletone_INDEX/good_stand_2.txt','r');
formatSpec = '%s';
sizeA = [20 Inf];
missing_file_list = fscanf(fileID,formatSpec,sizeA);
missing_file_list = missing_file_list.';
sizeA = [20 Inf];
perfect_list = fscanf(fileID,formatSpec,sizeA);
perfect_list = perfect_list.';
fclose(fileID);
perfect_list = [];
for K = 3:L
file_name = char(file_list(K).name);
missing_num = 0;
for J = 1:length(missing_file_list);
missing_name = missing_file_list(J,:);
if file_name(1:20) == missing_name
missing_num = 1;
end
end
if missing_num == 0
perfect_list = [perfect_list;file_name];
end
end
% make motion patch
L = length(perfect_list);
for K = 1:L
file_name = char(perfect_list(K,:));
name = strcat(path_name,file_name(1:20),'.skeleton');
num_body = file_name(22);
BN = str2num(num_body);
[token,remainder] = strtok(file_name,'A');
class = str2num(remainder(2:4));
if class == 20
bodyinfo = read_skeleton_file(name);
frame_num = size(bodyinfo,2);
bodyinfo = read_skeleton_file(name);
frame_num = size(bodyinfo,2);
try
%initialize
cur_subject_x = zeros(frame_num, 25);
cur_subject_y = zeros(frame_num, 25);
......@@ -60,131 +33,149 @@ for K = 1:L
joint_9 = zeros(1,3);
joint_1 = zeros(1,3);
joint_3 = zeros(1,3);
try
%get total joints information
for FN = 1:frame_num
cur_body = bodyinfo(FN).bodies(1);
joints = cur_body.joints;
%get total joints information
for FN = 1:frame_num
cur_body = bodyinfo(FN).bodies(1);
joints = cur_body.joints;
for JN = 1:25
tot_x(FN,JN) = joints(JN).x;
tot_y(FN,JN) = joints(JN).y;
tot_z(FN,JN) = joints(JN).z;
end
end
%Orientation normalization 1 : in space
%get median values
M_x = median(tot_x);
M_y = median(tot_y);
M_z = median(tot_z);
%set 3 points for make plane
joint_5 = [M_x(5) M_y(5) M_z(5)];
joint_9 = [M_x(9) M_y(9) M_z(9)];
joint_1 = [M_x(1) M_y(1) M_z(1)];
joint_3 = [M_x(3) M_y(3) M_z(3)];
%find RIGID TRNASFORMATION matrix
d1 = joint_1 - joint_5;
d2 = joint_1 - joint_9;
n1 = cross(d1,d2); % because we will parallel transform, don't need to find belly
u1 = n1/norm(n1);
u2 = [0 0 1];
cs1 = dot(u1,u2)/norm(u1)*norm(u2);
ss1 = sqrt(1-cs1.^2);
v1 = cross(u1,u2)/norm(cross(u1,u2));
R1 = [v1(1)*v1(1)*(1-cs1)+cs1 v1(1)*v1(2)*(1-cs1)-v1(3)*ss1 v1(1)*v1(3)*(1-cs1)+v1(2)*ss1];
R1(2,:) = [v1(1)*v1(2)*(1-cs1)+v1(3)*ss1 v1(2)*v1(2)*(1-cs1)+cs1 v1(2)*v1(3)*(1-cs1)-v1(1)*ss1];
R1(3,:) = [v1(1)*v1(3)*(1-cs1)-v1(2)*ss1 v1(2)*v1(3)*(1-cs1)+v1(1)*ss1 v1(3)*v1(3)*(1-cs1)+cs1];
%1-3 number tolls to parallel x axis. Rigid transformation on plane surface
%Z axis coords oyler angle transform
t = joint_3 - joint_1;
d3 = R1(1,:) * t.';
d3(1,2) = R1(2,:) * t.';
d3(1,3) = R1(3,:) * t.';
u3 = d3(1:2)/norm(d3(1:2));
v3 = [u3(1) -u3(2)];
v3(2,:) = [u3(2) u3(1)];
u4 = [1 0].';
csss = v3\u4;
cs2 = csss(1);
ss2 = csss(2);
R2 = [cs2 -ss2 0];
R2(2,:) = [ss2 cs2 0];
R2(3,:) = [0 0 1];
%apply rigid transformation
for FN = 1:frame_num
cur_body = bodyinfo(FN).bodies(1);
joints = cur_body.joints;
for JN = 1:25
a = R1(1,:) * [joints(JN).x joints(JN).y joints(JN).z].';
b = R1(2,:) * [joints(JN).x joints(JN).y joints(JN).z].';
c = R1(3,:) * [joints(JN).x joints(JN).y joints(JN).z].';
cur_subject_x(FN,JN) = R2(1,:) * [a b c].';
cur_subject_y(FN,JN) = R2(2,:) * [a b c].';
cur_subject_z(FN,JN) = R2(3,:) * [a b c].';
for JN = 1:25
tot_x(FN,JN) = joints(JN).x;
tot_y(FN,JN) = joints(JN).y;
tot_z(FN,JN) = joints(JN).z;
end
end
end
%orientation normalize 2 in plane surface
if cur_subject_x(1,4) < cur_subject_x(1,1)
cur_subject_x = 0 - cur_subject_x;
end
if cur_subject_y(1,9) > cur_subject_y(1,5)
cur_subject_y = 0 - cur_subject_y;
end
% for save origin subjects before data augment
clear_subject_x = cur_subject_x;
clear_subject_y = cur_subject_y;
clear_subject_z = cur_subject_z;
% Left <-> Right Change : 2option
for LR = 1:2
if LR == 1
augment_y = clear_subject_y;
else
augment_y = 0 - clear_subject_y;
end
%get median values
M_x = median(tot_x);
M_y = median(tot_y);
M_z = median(tot_z);
%set 3 points for make plane
joint_5 = [M_x(5) M_y(5) M_z(5)];
joint_9 = [M_x(9) M_y(9) M_z(9)];
joint_1 = [M_x(1) M_y(1) M_z(1)];
joint_3 = [M_x(3) M_y(3) M_z(3)];
%find RIGID TRNASFORMATION matrix
d1 = joint_1 - joint_5;
d2 = joint_1 - joint_9;
n1 = cross(d1,d2); % because we will parallel transform, don't need to find belly
u1 = n1/norm(n1);
u2 = [0 0 1];
cs1 = dot(u1,u2)/norm(u1)*norm(u2);
ss1 = sqrt(1-cs1.^2);
v1 = cross(u1,u2)/norm(cross(u1,u2));
R1 = [v1(1)*v1(1)*(1-cs1)+cs1 v1(1)*v1(2)*(1-cs1)-v1(3)*ss1 v1(1)*v1(3)*(1-cs1)+v1(2)*ss1];
R1(2,:) = [v1(1)*v1(2)*(1-cs1)+v1(3)*ss1 v1(2)*v1(2)*(1-cs1)+cs1 v1(2)*v1(3)*(1-cs1)-v1(1)*ss1];
R1(3,:) = [v1(1)*v1(3)*(1-cs1)-v1(2)*ss1 v1(2)*v1(3)*(1-cs1)+v1(1)*ss1 v1(3)*v1(3)*(1-cs1)+cs1];
%1-3 number tolls to parallel x axis. Rigid transformation on plane surface
%Z axis coords oyler angle transform
t = joint_3 - joint_1;
d3 = R1(1,:) * t.';
d3(1,2) = R1(2,:) * t.';
d3(1,3) = R1(3,:) * t.';
u3 = d3(1:2)/norm(d3(1:2));
v3 = [u3(1) -u3(2)];
v3(2,:) = [u3(2) u3(1)];
u4 = [1 0].';
csss = v3\u4;
cs2 = csss(1);
ss2 = csss(2);
R2 = [cs2 -ss2 0];
R2(2,:) = [ss2 cs2 0];
R2(3,:) = [0 0 1];
%apply rigid transformation
for FN = 1:frame_num
cur_body = bodyinfo(FN).bodies(1);
joints = cur_body.joints;
%Height change : 3option
for HE = 1:3
if HE == 1
augment_x = clear_subject_x.* 1.2;
elseif HE==2
augment_x = clear_subject_x.* 1.0;
else
augment_x = clear_subject_x.* 0.8;
end
for JN = 1:25
a = R1(1,:) * [joints(JN).x joints(JN).y joints(JN).z].';
b = R1(2,:) * [joints(JN).x joints(JN).y joints(JN).z].';
c = R1(3,:) * [joints(JN).x joints(JN).y joints(JN).z].';
%Give Gaussian Random Variable : 0.01 - 6times
for RV = 1:6
%3. Gaussian Random filter 0.1
cur_subject_x = augment_x + 0.01.*randn(frame_num,25);
cur_subject_y = augment_y + 0.01.*randn(frame_num,25);
cur_subject_z = clear_subject_z + 0.01.*randn(frame_num,25);
% NORMALIZATION
cur_subject_x = cur_subject_x - min(cur_subject_x(:));
max_tall = max(cur_subject_x(:));
cur_subject_x = cur_subject_x ./ max_tall;
cur_subject_y = cur_subject_y - min(cur_subject_y(:));
cur_subject_y = cur_subject_y ./ max_tall;
cur_subject_z = cur_subject_z - min(cur_subject_z(:));
cur_subject_z = cur_subject_z ./ max_tall;
cur_subject_x(FN,JN) = R2(1,:) * [a b c].';
cur_subject_y(FN,JN) = R2(2,:) * [a b c].';
cur_subject_z(FN,JN) = R2(3,:) * [a b c].';
%Write image
motionpatch = cur_subject_x;
motionpatch(:,:,2) = cur_subject_y;
motionpatch(:,:,3) = cur_subject_z;
new_file_name = strcat('/home/rfj/바탕화면/actionGAN/DCGAN/new_motionpatch/',file_name(1:20),'_',num2str(LR),num2str(HE),num2str(RV),'.png');
imwrite(motionpatch,new_file_name);
end
end
%orientation normalize 2 (with plane surface)
if cur_subject_x(1,4) < cur_subject_x(1,1)
cur_subject_x = 0 - cur_subject_x;
end
if cur_subject_y(1,9) > cur_subject_y(1,5)
cur_subject_y = 0 - cur_subject_y;
end
%get current median
CM_x=median(cur_subject_x);
CM_y=median(cur_subject_y);
CM_z=median(cur_subject_z);
%for transform bellybutton to 0.5,0.5 (Except X) but it doesn't work
belly_button = 0.5 - CM_y(2);
belly_button(2) = 0.5 - CM_z(2);
% normalize with x... <- HERE! WANT TO PARALLEL TRANSFORM
... but if I plus belly_button for x and y axis , it dosn't work
cur_subject_x = cur_subject_x - min(cur_subject_x(:));
max_tall = max(cur_subject_x(:));
cur_subject_x = cur_subject_x ./ max_tall;
cur_subject_y = cur_subject_y - min(cur_subject_y(:));
cur_subject_y = cur_subject_y ./ max_tall;
cur_subject_z = cur_subject_z - min(cur_subject_z(:));
cur_subject_z = cur_subject_z ./ max_tall;
% 이미지 저장
motionpatch = cur_subject_x;
motionpatch(:,:,2) = cur_subject_y;
motionpatch(:,:,3) = cur_subject_z;
new_file_name = strcat('/home/rfj/바탕화면/motionpatch/',num2str(class),'/',file_name(1:20),'.png');
imwrite(motionpatch,new_file_name);
catch
name
end
catch
name
end
end
......
clear;
path_name = '/media/rfj/EEA4441FA443E923/nturgb_skeletones/';
fileID = fopen('/home/rfj/바탕화면/actionGAN/skeletone_INDEX/good_stand_2.txt','r');
formatSpec = '%s';
sizeA = [20 Inf];
perfect_list = fscanf(fileID,formatSpec,sizeA);
perfect_list = perfect_list.';
fclose(fileID);
L = length(perfect_list);
for K = 1:L
file_name = char(perfect_list(K,:));
name = strcat(path_name,file_name(1:20),'.skeleton');
[token,remainder] = strtok(file_name,'A');
class = str2num(remainder(2:4));
bodyinfo = read_skeleton_file(name);
frame_num = size(bodyinfo,2);
try
%initialize
cur_subject_x = zeros(frame_num, 25);
cur_subject_y = zeros(frame_num, 25);
cur_subject_z = zeros(frame_num, 25);
tot_x = zeros(frame_num,25);
tot_y = zeros(frame_num,25);
tot_z = zeros(frame_num,25);
joint_5 = zeros(1,3);
joint_9 = zeros(1,3);
joint_1 = zeros(1,3);
joint_3 = zeros(1,3);
%get total joints information
for FN = 1:frame_num
cur_body = bodyinfo(FN).bodies(1);
joints = cur_body.joints;
for JN = 1:25
tot_x(FN,JN) = joints(JN).x;
tot_y(FN,JN) = joints(JN).y;
tot_z(FN,JN) = joints(JN).z;
end
end
%Orientation normalization 1 : in space
%get median values
M_x = median(tot_x);
M_y = median(tot_y);
M_z = median(tot_z);
%set 3 points for make plane
joint_5 = [M_x(5) M_y(5) M_z(5)];
joint_9 = [M_x(9) M_y(9) M_z(9)];
joint_1 = [M_x(1) M_y(1) M_z(1)];
joint_3 = [M_x(3) M_y(3) M_z(3)];
%find RIGID TRNASFORMATION matrix
d1 = joint_1 - joint_5;
d2 = joint_1 - joint_9;
n1 = cross(d1,d2); % because we will parallel transform, don't need to find belly
u1 = n1/norm(n1);
u2 = [0 0 1];
cs1 = dot(u1,u2)/norm(u1)*norm(u2);
ss1 = sqrt(1-cs1.^2);
v1 = cross(u1,u2)/norm(cross(u1,u2));
R1 = [v1(1)*v1(1)*(1-cs1)+cs1 v1(1)*v1(2)*(1-cs1)-v1(3)*ss1 v1(1)*v1(3)*(1-cs1)+v1(2)*ss1];
R1(2,:) = [v1(1)*v1(2)*(1-cs1)+v1(3)*ss1 v1(2)*v1(2)*(1-cs1)+cs1 v1(2)*v1(3)*(1-cs1)-v1(1)*ss1];
R1(3,:) = [v1(1)*v1(3)*(1-cs1)-v1(2)*ss1 v1(2)*v1(3)*(1-cs1)+v1(1)*ss1 v1(3)*v1(3)*(1-cs1)+cs1];
%1-3 number tolls to parallel x axis. Rigid transformation on plane surface
%Z axis coords oyler angle transform
t = joint_3 - joint_1;
d3 = R1(1,:) * t.';
d3(1,2) = R1(2,:) * t.';
d3(1,3) = R1(3,:) * t.';
u3 = d3(1:2)/norm(d3(1:2));
v3 = [u3(1) -u3(2)];
v3(2,:) = [u3(2) u3(1)];
u4 = [1 0].';
csss = v3\u4;
cs2 = csss(1);
ss2 = csss(2);
R2 = [cs2 -ss2 0];
R2(2,:) = [ss2 cs2 0];
R2(3,:) = [0 0 1];
%apply rigid transformation
for FN = 1:frame_num
cur_body = bodyinfo(FN).bodies(1);
joints = cur_body.joints;
for JN = 1:25
a = R1(1,:) * [joints(JN).x joints(JN).y joints(JN).z].';
b = R1(2,:) * [joints(JN).x joints(JN).y joints(JN).z].';
c = R1(3,:) * [joints(JN).x joints(JN).y joints(JN).z].';
cur_subject_x(FN,JN) = R2(1,:) * [a b c].';
cur_subject_y(FN,JN) = R2(2,:) * [a b c].';
cur_subject_z(FN,JN) = R2(3,:) * [a b c].';
end
end
%orientation normalize 2 in plane surface
if cur_subject_x(1,4) < cur_subject_x(1,1)
cur_subject_x = 0 - cur_subject_x;
end
if cur_subject_y(1,9) > cur_subject_y(1,5)
cur_subject_y = 0 - cur_subject_y;
end
% for save origin subjects before data augment
clear_subject_x = cur_subject_x;
clear_subject_y = cur_subject_y;
clear_subject_z = cur_subject_z;
% Left <-> Right Change : 2option
for LR = 1:2
if LR == 1
augment_y = clear_subject_y;
else
augment_y = 0 - clear_subject_y;
end
%Height change : 3option
for HE = 1:3
if HE == 1
augment_x = clear_subject_x.* 1.2;
elseif HE==2
augment_x = clear_subject_x.* 1.0;
else
augment_x = clear_subject_x.* 0.8;
end
%Give Gaussian Random Variable : 0.01 - 6times
for RV = 1:6
%3. Gaussian Random filter 0.1
cur_subject_x = augment_x + 0.01.*randn(frame_num,25);
cur_subject_y = augment_y + 0.01.*randn(frame_num,25);
cur_subject_z = clear_subject_z + 0.01.*randn(frame_num,25);
% NORMALIZATION
cur_subject_x = cur_subject_x - min(cur_subject_x(:));
max_tall = max(cur_subject_x(:)) .*2;
cur_subject_x = cur_subject_x ./ max_tall;
cur_subject_y = cur_subject_y - min(cur_subject_y(:));
cur_subject_y = cur_subject_y ./ max_tall;
cur_subject_z = cur_subject_z - min(cur_subject_z(:));
cur_subject_z = cur_subject_z ./ max_tall;
%Write image
motionpatch = cur_subject_x;
motionpatch(:,:,2) = cur_subject_y;
motionpatch(:,:,3) = cur_subject_z;
new_file_name = strcat('/home/rfj/바탕화면/actionGAN/DCGAN/new_motionpatch_halfsize/',file_name(1:20),'_',num2str(LR),num2str(HE),num2str(RV),'.png');
imwrite(motionpatch,new_file_name);
end
end
end
catch
name
end
end
clear;
path_name = '/media/rfj/EEA4441FA443E923/nturgb_skeletones/';
fileID = fopen('/home/rfj/바탕화면/actionGAN/skeletone_INDEX/good_stand_2.txt','r');
formatSpec = '%s';
sizeA = [20 Inf];
perfect_list = fscanf(fileID,formatSpec,sizeA);
perfect_list = perfect_list.';
fclose(fileID);
L = length(perfect_list);
for K = 1:L
file_name = char(perfect_list(K,:));
name = strcat(path_name,file_name(1:20),'.skeleton');
[token,remainder] = strtok(file_name,'A');
class = str2num(remainder(2:4));
bodyinfo = read_skeleton_file(name);
frame_num = size(bodyinfo,2);
try
%initialize
cur_subject_x = zeros(frame_num, 25);
cur_subject_y = zeros(frame_num, 25);
cur_subject_z = zeros(frame_num, 25);
tot_x = zeros(frame_num,25);
tot_y = zeros(frame_num,25);
tot_z = zeros(frame_num,25);
joint_5 = zeros(1,3);
joint_9 = zeros(1,3);
joint_1 = zeros(1,3);
joint_3 = zeros(1,3);
%get total joints information
for FN = 1:frame_num
cur_body = bodyinfo(FN).bodies(1);
joints = cur_body.joints;
for JN = 1:25
tot_x(FN,JN) = joints(JN).x;
tot_y(FN,JN) = joints(JN).y;
tot_z(FN,JN) = joints(JN).z;
end
end
%Orientation normalization 1 : in space
%get median values
M_x = median(tot_x);
M_y = median(tot_y);
M_z = median(tot_z);
%set 3 points for make plane
joint_5 = [M_x(5) M_y(5) M_z(5)];
joint_9 = [M_x(9) M_y(9) M_z(9)];
joint_1 = [M_x(1) M_y(1) M_z(1)];
joint_3 = [M_x(3) M_y(3) M_z(3)];
%find RIGID TRNASFORMATION matrix
d1 = joint_1 - joint_5;
d2 = joint_1 - joint_9;
n1 = cross(d1,d2); % because we will parallel transform, don't need to find belly
u1 = n1/norm(n1);
u2 = [0 0 1];
cs1 = dot(u1,u2)/norm(u1)*norm(u2);
ss1 = sqrt(1-cs1.^2);
v1 = cross(u1,u2)/norm(cross(u1,u2));
R1 = [v1(1)*v1(1)*(1-cs1)+cs1 v1(1)*v1(2)*(1-cs1)-v1(3)*ss1 v1(1)*v1(3)*(1-cs1)+v1(2)*ss1];
R1(2,:) = [v1(1)*v1(2)*(1-cs1)+v1(3)*ss1 v1(2)*v1(2)*(1-cs1)+cs1 v1(2)*v1(3)*(1-cs1)-v1(1)*ss1];
R1(3,:) = [v1(1)*v1(3)*(1-cs1)-v1(2)*ss1 v1(2)*v1(3)*(1-cs1)+v1(1)*ss1 v1(3)*v1(3)*(1-cs1)+cs1];
%1-3 number tolls to parallel x axis. Rigid transformation on plane surface
%Z axis coords oyler angle transform
t = joint_3 - joint_1;
d3 = R1(1,:) * t.';
d3(1,2) = R1(2,:) * t.';
d3(1,3) = R1(3,:) * t.';
u3 = d3(1:2)/norm(d3(1:2));
v3 = [u3(1) -u3(2)];
v3(2,:) = [u3(2) u3(1)];
u4 = [0 1].'; % decide orientation in plane
csss = v3\u4;
cs2 = csss(1);
ss2 = csss(2);
R2 = [cs2 -ss2 0];
R2(2,:) = [ss2 cs2 0];
R2(3,:) = [0 0 1];
%apply rigid transformation
for FN = 1:frame_num
cur_body = bodyinfo(FN).bodies(1);
joints = cur_body.joints;
for JN = 1:25
a = R1(1,:) * [joints(JN).x joints(JN).y joints(JN).z].';
b = R1(2,:) * [joints(JN).x joints(JN).y joints(JN).z].';
c = R1(3,:) * [joints(JN).x joints(JN).y joints(JN).z].';
cur_subject_x(FN,JN) = R2(1,:) * [a b c].';
cur_subject_y(FN,JN) = R2(2,:) * [a b c].';
cur_subject_z(FN,JN) = R2(3,:) * [a b c].';
end
end
%orientation normalize 2 (with plane surface)
if cur_subject_y(1,4) < cur_subject_y(1,1)
cur_subject_y = 0 - cur_subject_y;
end
if cur_subject_x(1,9) > cur_subject_x(1,5)
cur_subject_x = 0 - cur_subject_x;
end
% for save origin subjects before data augment
clear_subject_x = cur_subject_x;
clear_subject_y = cur_subject_y;
clear_subject_z = cur_subject_z;
% Left <-> Right Change : 2option
for LR = 1:2
if LR == 1
augment_x = clear_subject_x;
else
augment_x = 0 - clear_subject_x;
end
%Height change : 3option
for HE = 1:3
if HE == 1
augment_y = clear_subject_y.* 1.2;
elseif HE==2
augment_y = clear_subject_y.* 1.0;
else
augment_y = clear_subject_y.* 0.8;
end
%Give Gaussian Random Variable : 0.01 - 6times
for RV = 1:6
%3. Gaussian Random filter 0.1
cur_subject_x = augment_x + 0.01.*randn(frame_num,25);
cur_subject_y = augment_y + 0.01.*randn(frame_num,25);
cur_subject_z = clear_subject_z + 0.01.*randn(frame_num,25);
% NORMALIZATION
cur_subject_y = cur_subject_y - min(cur_subject_y(:));
max_tall = max(cur_subject_y(:));
cur_subject_y = cur_subject_y ./ max_tall;
cur_subject_x = cur_subject_x - min(cur_subject_x(:));
cur_subject_x = cur_subject_x ./ max_tall;
cur_subject_z = cur_subject_z - min(cur_subject_z(:));
cur_subject_z = cur_subject_z ./ max_tall;
%Write image
motionpatch = cur_subject_x;
motionpatch(:,:,2) = cur_subject_y;
motionpatch(:,:,3) = cur_subject_z;
new_file_name = strcat('/home/rfj/바탕화면/actionGAN/DCGAN/new_motionpatch_rotate90/',file_name(1:20),'_',num2str(LR),num2str(HE),num2str(RV),'.png');
imwrite(motionpatch,new_file_name);
end
end
end
catch
name
end
end
......@@ -7,7 +7,7 @@
clear;
name = '/home/rfj/바탕화면/skeletones/S001C001P002R002A020.skeleton'
name = '/home/rfj/바탕화면/actionGAN/sample_skeletones/S001C001P001R002A020.skeleton'
bodyinfo = read_skeleton_file(name);
frame_num = size(bodyinfo,2);
......@@ -37,6 +37,7 @@ for FN = 1:frame_num
end
end
%Orientation normalization 1 : in space
%get median values
M_x = median(tot_x);
M_y = median(tot_y);
......@@ -109,89 +110,59 @@ end
if cur_subject_y(1,9) > cur_subject_y(1,5)
cur_subject_y = 0 - cur_subject_y;
end
%get current median
CM_x=median(cur_subject_x);
CM_y=median(cur_subject_y);
CM_z=median(cur_subject_z);
%for transform bellybutton to 0.5,0.5 (Except X) but it doesn't work
belly_button = 0.5 - CM_y(2);
belly_button(2) = 0.5 - CM_z(2);
% normalize with x... <- HERE! WANT TO PARALLEL TRANSFORM
... but if I plus belly_button for x and y axis , it dosn't work
cur_subject_x = cur_subject_x - min(cur_subject_x(:));
max_tall = max(cur_subject_x(:));
cur_subject_x = cur_subject_x ./ max_tall;
cur_subject_y = cur_subject_y - min(cur_subject_y(:));
cur_subject_y = cur_subject_y ./ max_tall;
cur_subject_z = cur_subject_z - min(cur_subject_z(:));
cur_subject_z = cur_subject_z ./ max_tall;
% 이미지 저장
motionpatch = cur_subject_x;
motionpatch(:,:,2) = cur_subject_y;
motionpatch(:,:,3) = cur_subject_z;
new_file_name = strcat('/home/rfj/바탕화면/sample.png');
imwrite(motionpatch,new_file_name);
% read image after write
ori = imread('/home/rfj/바탕화면/sample.png');
ori = im2double(ori);
ori = ori(:,:,:);
dx = [];
dy = [];
dz = [];
for f = 1:numel(ori(:,1,1))
for j = 1:25
dx = [dx;ori(f,j,1)];
dy = [dy;ori(f,j,2)];
dz = [dz;ori(f,j,3)];
% for save origin subjects before data augment
clear_subject_x = cur_subject_x;
clear_subject_y = cur_subject_y;
clear_subject_z = cur_subject_z;
% Left <-> Right Change : 2option
for LR = 1:2
if LR == 1
augment_y = clear_subject_y;
else
augment_y = 0 - clear_subject_y;
end
end
a = [1 0 0]; % Red 척추 1,2,3,4,20
b = [0 0 1]; % Blue 오른팔 8,9,10,11,23,24
c = [0 1 0]; % Green왼팔 5,6,7,21,22 (여기서 5번이 빠짐. 넣고싶으면 나중에 24 joint가 아니라 25 joint로 추가)
d = [1 1 0]; % Yellow 오른다리 16,17,18,19
e = [0 1 1]; % Skyblue 왼다리 12,13,14,15
colors = [a;a;a;a;c;c;c;c;b;b;b;b;e;e;e;e;d;d;d;d;a;c;c;b;b];
scatter3(dx,dy,dz,100,'filled');
connecting_joints= ...
[2 1 21 3 21 5 6 7 21 9 10 11 1 13 14 15 1 17 18 19 2 8 8 12 12];
for jj=1:25:numel(dx)% 1부터 8개씩 numel = 열갯수..?
current = [];
current(:,1) = dy(jj:jj+24) ;
current(:,2) = dz(jj:jj+24) ;
current(:,3) = dx(jj:jj+24) ;
scatter3(current(:,1),current(:,2),current(:,3),100,colors(:,:),'filled');
for j =1:25
k=connecting_joints(j);
line([current(j,1) current(k,1)], [current(j,2) current(k,2)] , [current(j,3) current(k,3)])
%Height change : 3option
for HE = 1:3
if HE == 1
augment_x = clear_subject_x.* 1.2;
elseif HE==2
augment_x = clear_subject_x.* 1.0;
else
augment_x = clear_subject_x.* 0.8;
end
%Give Gaussian Random Variable : 0.01 - 6times
for RV = 1:6
%3. Gaussian Random filter 0.1
cur_subject_x = augment_x + 0.01.*randn(frame_num,25);
cur_subject_y = augment_y + 0.01.*randn(frame_num,25);
cur_subject_z = clear_subject_z + 0.01.*randn(frame_num,25);
% NORMALIZATION
cur_subject_x = cur_subject_x - min(cur_subject_x(:));
max_tall = max(cur_subject_x(:));
cur_subject_x = cur_subject_x ./ max_tall;
cur_subject_y = cur_subject_y - min(cur_subject_y(:));
cur_subject_y = cur_subject_y ./ max_tall;
cur_subject_z = cur_subject_z - min(cur_subject_z(:));
cur_subject_z = cur_subject_z ./ max_tall;
%Write image
motionpatch = cur_subject_x;
motionpatch(:,:,2) = cur_subject_y;
motionpatch(:,:,3) = cur_subject_z;
new_file_name = strcat('/home/rfj/바탕화면/actionGAN/DCGAN/new_motionpatch/sample_',num2str(LR),num2str(HE),num2str(RV),'.png');
imwrite(motionpatch,new_file_name);
end
end
set(gca,'Xdir','reverse','Ydir','reverse')
xlim([0 1]);
xlabel('x')
ylim([0 1]);
ylabel('y')
zlim([0 1]);
zlabel('z')
drawnow
pause(0.01)
end
\ No newline at end of file
end
......
This file is too large to display.
This file is too large to display.
This file is too large to display.
This file is too large to display.
......@@ -4,7 +4,6 @@ S001C001P005R002A020
S001C001P007R001A020
S001C001P008R002A020
S001C002P002R002A020
S001C001P001R001A020
S001C002P003R002A020
S001C002P005R001A020
S001C002P005R002A020
......