yunjey

download mnist and resize to 32x32

No preview for this file type
1 -import tensorflow as tf
2 -
3 -# configuration for tensorflow 0.11 and 0.12 version
4 -try:
5 - # tensorflow 0.12 version
6 - image_summary = tf.summary.image
7 - scalar_summary = tf.summary.scalar
8 - histogram_summary = tf.summary.histogram
9 - merge_summary = tf.summary.merge_all
10 - SummaryWriter = tf.summary.FileWriter
11 -except:
12 - # tensorflow <= 0.11 version
13 - image_summary = tf.image_summary
14 - scalar_summary = tf.scalar_summary
15 - histogram_summary = tf.histogram_summary
16 - merge_summary = tf.merge_all_summaries
17 - SummaryWriter = tf.train.SummaryWriter
...\ No newline at end of file ...\ No newline at end of file
1 -import tensorflow as tf
2 -
3 -
4 -class batch_norm(object):
5 - """Computes batch normalization operation
6 -
7 - Args:
8 - x: input tensor of shape (batch_size, width, height, channels_in) or (batch_size, dim_in)
9 - train: True or False; At train mode, it normalizes the input with mini-batch statistics
10 - At test mode, it normalizes the input with the moving averages and variances
11 -
12 - Returns:
13 - out: batch normalized output of the same shape with x
14 - """
15 - def __init__(self, name):
16 - self.name = name
17 -
18 - def __call__(self, x, train=True):
19 - out = tf.contrib.layers.batch_norm(x, decay=0.99, center=True, scale=True, activation_fn=None,
20 - updates_collections=None, is_training=train, scope=self.name)
21 - return out
22 -
23 -
24 -def conv2d(x, channel_out, k_w=5, k_h=5, s_w=2, s_h=2, name=None):
25 - """Computes convolution operation
26 -
27 - Args:
28 - x: input tensor of shape (batch_size, width_in, heigth_in, channel_in)
29 - channel_out: number of channel for output tensor
30 - k_w: kernel width size; default is 5
31 - k_h: kernel height size; default is 5
32 - s_w: stride size for width; default is 2
33 - s_h: stride size for heigth; default is 2
34 -
35 - Returns:
36 - out: output tensor of shape (batch_size, width_out, height_out, channel_out)
37 - """
38 - channel_in = x.get_shape()[-1]
39 -
40 - with tf.variable_scope(name):
41 - w = tf.get_variable('w', shape=[k_w, k_h, channel_in, channel_out],
42 - initializer=tf.contrib.layers.xavier_initializer())
43 - b = tf.get_variable('b', shape=[channel_out], initializer=tf.constant_initializer(0.0))
44 -
45 - out = tf.nn.conv2d(x, w, strides=[1, s_w, s_h, 1], padding='SAME') + b
46 -
47 - return out
48 -
49 -
50 -def deconv2d(x, output_shape, k_w=5, k_h=5, s_w=2, s_h=2, name=None):
51 - """Computes deconvolution operation
52 -
53 - Args:
54 - x: input tensor of shape (batch_size, width_in, height_in, channel_in)
55 - output_shape: list corresponding to [batch_size, width_out, height_out, channel_out]
56 - k_w: kernel width size; default is 5
57 - k_h: kernel height size; default is 5
58 - s_w: stride size for width; default is 2
59 - s_h: stride size for heigth; default is 2
60 -
61 - Returns:
62 - out: output tensor of shape (batch_size, width_out, hegith_out, channel_out)
63 - """
64 - channel_in = x.get_shape()[-1]
65 - channel_out = output_shape[-1]
66 -
67 -
68 - with tf.variable_scope(name):
69 - w = tf.get_variable('w', shape=[k_w, k_h, channel_out, channel_in],
70 - initializer=tf.contrib.layers.xavier_initializer())
71 - b = tf.get_variable('b', shape=[channel_out], initializer=tf.constant_initializer(0.0))
72 -
73 - out = tf.nn.conv2d_transpose(x, filter=w, output_shape=output_shape, strides=[1, s_w, s_h, 1]) + b
74 -
75 - return out
76 -
77 -def linear(x, dim_out, name=None):
78 - """Computes linear transform (fully-connected layer)
79 -
80 - Args:
81 - x: input tensor of shape (batch_size, dim_in)
82 - dim_out: dimension for output tensor
83 -
84 - Returns:
85 - out: output tensor of shape (batch_size, dim_out)
86 - """
87 - dim_in = x.get_shape()[-1]
88 -
89 - with tf.variable_scope(name):
90 - w = tf.get_variable('w', shape=[dim_in, dim_out], initializer=tf.contrib.layers.xavier_initializer())
91 - b = tf.get_variable('b', shape=[dim_out], initializer=tf.constant_initializer(0.0))
92 -
93 - out = tf.matmul(x, w) + b
94 -
95 - return out
96 -
97 -
98 -def relu(x):
99 - return tf.nn.relu(x)
100 -
101 -
102 -def lrelu(x, leak=0.2):
103 - return tf.maximum(x, leak*x)
...\ No newline at end of file ...\ No newline at end of file
1 +import numpy as np
2 +import pickle
3 +from PIL import Image
4 +from tensorflow.examples.tutorials.mnist import input_data
5 +
6 +
7 +def resize_images(image_arrays, size=[32, 32]):
8 + # convert float type to integer
9 + image_arrays = (image_arrays * 255).astype('uint8')
10 +
11 + resized_image_arrays = np.zeros([image_arrays.shape[0]]+size)
12 + for i, image_array in enumerate(image_arrays):
13 + image = Image.fromarray(image_array)
14 + resized_image = image.resize(size=size, resample=Image.ANTIALIAS)
15 +
16 + resized_image_arrays[i] = np.asarray(resized_image)
17 +
18 + return np.expand_dims(resized_image_arrays, 3)
19 +
20 +def save_pickle(data, path):
21 + with open(path, 'wb') as f:
22 + pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
23 + print ('Saved %s..' %path)
24 +
25 +def main():
26 + mnist = input_data.read_data_sets(train_dir='mnist')
27 +
28 + train = {'X': resize_images(mnist.train.images.reshape(-1, 28, 28)),
29 + 'y': mnist.train.labels}
30 +
31 + test = {'X': resize_images(mnist.test.images.reshape(-1, 28, 28)),
32 + 'y': mnist.test.labels}
33 +
34 + save_pickle(train, 'mnist/train.pkl')
35 + save_pickle(test, 'mnist/test.pkl')
36 +
37 +
38 +if __name__ == "__main__":
39 + main()
40 +
...\ No newline at end of file ...\ No newline at end of file