Showing
13 changed files
with
219 additions
and
0 deletions
This file is too large to display.
1 | +from kafka import KafkaProducer | ||
2 | +import cv2 | ||
3 | + | ||
4 | + | ||
5 | +topic = "testing" # 구독할 topic | ||
6 | +producer = KafkaProducer(bootstrap_servers='') # kafka 부트스트랩 서버 입력 | ||
7 | + | ||
8 | +video_file = "" # 입력 파일 | ||
9 | +video = cv2.VideoCapture(video_file) | ||
10 | + | ||
11 | +while video.isOpened(): | ||
12 | + success, frame = video.read() | ||
13 | + if not success: | ||
14 | + break | ||
15 | + _, img = cv2.imencode('.jpg', frame) | ||
16 | + | ||
17 | + producer.send(topic, img.tobytes()) | ||
18 | + | ||
19 | +video.release() | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
1 | +{"class_name": "Model", "backend": "tensorflow", "keras_version": "2.3.1", "config": {"name": "model_1", "input_layers": [["conv1_input", 0, 0]], "layers": [{"name": "conv1_input", "inbound_nodes": [], "class_name": "InputLayer", "config": {"name": "conv1_input", "sparse": false, "batch_input_shape": [null, 16, 112, 112, 3], "dtype": "float32"}}, {"name": "conv1", "inbound_nodes": [[["conv1_input", 0, 0, {}]]], "class_name": "Conv3D", "config": {"bias_regularizer": null, "padding": "same", "dilation_rate": [1, 1, 1], "kernel_size": [3, 3, 3], "kernel_initializer": {"class_name": "VarianceScaling", "config": {"mode": "fan_avg", "seed": null, "distribution": "uniform", "scale": 1.0}}, "dtype": "float32", "name": "conv1", "filters": 64, "activation": "relu", "batch_input_shape": [null, 16, 112, 112, 3], "kernel_constraint": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "data_format": "channels_last", "activity_regularizer": null, "kernel_regularizer": null, "bias_constraint": null, "trainable": true, "use_bias": true, "strides": [1, 1, 1]}}, {"name": "pool1", "inbound_nodes": [[["conv1", 0, 0, {}]]], "class_name": "MaxPooling3D", "config": {"padding": "same", "strides": [1, 2, 2], "dtype": "float32", "trainable": true, "name": "pool1", "data_format": "channels_last", "pool_size": [1, 2, 2]}}, {"name": "conv2", "inbound_nodes": [[["pool1", 0, 0, {}]]], "class_name": "Conv3D", "config": {"bias_regularizer": null, "padding": "same", "dilation_rate": [1, 1, 1], "kernel_size": [3, 3, 3], "kernel_initializer": {"class_name": "VarianceScaling", "config": {"mode": "fan_avg", "seed": null, "distribution": "uniform", "scale": 1.0}}, "dtype": "float32", "name": "conv2", "filters": 128, "activation": "relu", "kernel_constraint": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "data_format": "channels_last", "activity_regularizer": null, "kernel_regularizer": null, "bias_constraint": null, "trainable": true, "use_bias": true, "strides": [1, 1, 1]}}, {"name": "pool2", "inbound_nodes": [[["conv2", 0, 0, {}]]], "class_name": "MaxPooling3D", "config": {"padding": "valid", "strides": [2, 2, 2], "dtype": "float32", "trainable": true, "name": "pool2", "data_format": "channels_last", "pool_size": [2, 2, 2]}}, {"name": "conv3a", "inbound_nodes": [[["pool2", 0, 0, {}]]], "class_name": "Conv3D", "config": {"bias_regularizer": null, "padding": "same", "dilation_rate": [1, 1, 1], "kernel_size": [3, 3, 3], "kernel_initializer": {"class_name": "VarianceScaling", "config": {"mode": "fan_avg", "seed": null, "distribution": "uniform", "scale": 1.0}}, "dtype": "float32", "name": "conv3a", "filters": 256, "activation": "relu", "kernel_constraint": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "data_format": "channels_last", "activity_regularizer": null, "kernel_regularizer": null, "bias_constraint": null, "trainable": true, "use_bias": true, "strides": [1, 1, 1]}}, {"name": "conv3b", "inbound_nodes": [[["conv3a", 0, 0, {}]]], "class_name": "Conv3D", "config": {"bias_regularizer": null, "padding": "same", "dilation_rate": [1, 1, 1], "kernel_size": [3, 3, 3], "kernel_initializer": {"class_name": "VarianceScaling", "config": {"mode": "fan_avg", "seed": null, "distribution": "uniform", "scale": 1.0}}, "dtype": "float32", "name": "conv3b", "filters": 256, "activation": "relu", "kernel_constraint": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "data_format": "channels_last", "activity_regularizer": null, "kernel_regularizer": null, "bias_constraint": null, "trainable": true, "use_bias": true, "strides": [1, 1, 1]}}, {"name": "pool3", "inbound_nodes": [[["conv3b", 0, 0, {}]]], "class_name": "MaxPooling3D", "config": {"padding": "valid", "strides": [2, 2, 2], "dtype": "float32", "trainable": true, "name": "pool3", "data_format": "channels_last", "pool_size": [2, 2, 2]}}, {"name": "conv4a", "inbound_nodes": [[["pool3", 0, 0, {}]]], "class_name": "Conv3D", "config": {"bias_regularizer": null, "padding": "same", "dilation_rate": [1, 1, 1], "kernel_size": [3, 3, 3], "kernel_initializer": {"class_name": "VarianceScaling", "config": {"mode": "fan_avg", "seed": null, "distribution": "uniform", "scale": 1.0}}, "dtype": "float32", "name": "conv4a", "filters": 512, "activation": "relu", "kernel_constraint": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "data_format": "channels_last", "activity_regularizer": null, "kernel_regularizer": null, "bias_constraint": null, "trainable": true, "use_bias": true, "strides": [1, 1, 1]}}, {"name": "conv4b", "inbound_nodes": [[["conv4a", 0, 0, {}]]], "class_name": "Conv3D", "config": {"bias_regularizer": null, "padding": "same", "dilation_rate": [1, 1, 1], "kernel_size": [3, 3, 3], "kernel_initializer": {"class_name": "VarianceScaling", "config": {"mode": "fan_avg", "seed": null, "distribution": "uniform", "scale": 1.0}}, "dtype": "float32", "name": "conv4b", "filters": 512, "activation": "relu", "kernel_constraint": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "data_format": "channels_last", "activity_regularizer": null, "kernel_regularizer": null, "bias_constraint": null, "trainable": true, "use_bias": true, "strides": [1, 1, 1]}}, {"name": "pool4", "inbound_nodes": [[["conv4b", 0, 0, {}]]], "class_name": "MaxPooling3D", "config": {"padding": "valid", "strides": [2, 2, 2], "dtype": "float32", "trainable": true, "name": "pool4", "data_format": "channels_last", "pool_size": [2, 2, 2]}}, {"name": "conv5a", "inbound_nodes": [[["pool4", 0, 0, {}]]], "class_name": "Conv3D", "config": {"bias_regularizer": null, "padding": "same", "dilation_rate": [1, 1, 1], "kernel_size": [3, 3, 3], "kernel_initializer": {"class_name": "VarianceScaling", "config": {"mode": "fan_avg", "seed": null, "distribution": "uniform", "scale": 1.0}}, "dtype": "float32", "name": "conv5a", "filters": 512, "activation": "relu", "kernel_constraint": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "data_format": "channels_last", "activity_regularizer": null, "kernel_regularizer": null, "bias_constraint": null, "trainable": true, "use_bias": true, "strides": [1, 1, 1]}}, {"name": "conv5b", "inbound_nodes": [[["conv5a", 0, 0, {}]]], "class_name": "Conv3D", "config": {"bias_regularizer": null, "padding": "same", "dilation_rate": [1, 1, 1], "kernel_size": [3, 3, 3], "kernel_initializer": {"class_name": "VarianceScaling", "config": {"mode": "fan_avg", "seed": null, "distribution": "uniform", "scale": 1.0}}, "dtype": "float32", "name": "conv5b", "filters": 512, "activation": "relu", "kernel_constraint": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "data_format": "channels_last", "activity_regularizer": null, "kernel_regularizer": null, "bias_constraint": null, "trainable": true, "use_bias": true, "strides": [1, 1, 1]}}, {"name": "zero_padding3d_1", "inbound_nodes": [[["conv5b", 0, 0, {}]]], "class_name": "ZeroPadding3D", "config": {"trainable": true, "name": "zero_padding3d_1", "padding": [[0, 0], [1, 1], [1, 1]], "data_format": "channels_last", "dtype": "float32"}}, {"name": "pool5", "inbound_nodes": [[["zero_padding3d_1", 0, 0, {}]]], "class_name": "MaxPooling3D", "config": {"padding": "valid", "strides": [2, 2, 2], "dtype": "float32", "trainable": true, "name": "pool5", "data_format": "channels_last", "pool_size": [2, 2, 2]}}, {"name": "flatten_1", "inbound_nodes": [[["pool5", 0, 0, {}]]], "class_name": "Flatten", "config": {"trainable": true, "name": "flatten_1", "data_format": "channels_last", "dtype": "float32"}}, {"name": "fc6", "inbound_nodes": [[["flatten_1", 0, 0, {}]]], "class_name": "Dense", "config": {"bias_regularizer": null, "activation": "relu", "kernel_initializer": {"class_name": "VarianceScaling", "config": {"mode": "fan_avg", "seed": null, "distribution": "uniform", "scale": 1.0}}, "dtype": "float32", "name": "fc6", "units": 4096, "kernel_constraint": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "activity_regularizer": null, "kernel_regularizer": null, "bias_constraint": null, "trainable": true, "use_bias": true}}], "output_layers": [["fc6", 0, 0]]}} | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
1 | +# -*- coding: utf-8 -*- | ||
2 | + | ||
3 | +import numpy as np | ||
4 | +from keras.utils.data_utils import get_file | ||
5 | +from scipy.misc import imresize | ||
6 | + | ||
7 | +from bigdl.nn.layer import * | ||
8 | + | ||
9 | + | ||
10 | +C3D_MEAN_PATH = 'https://github.com/adamcasson/c3d/releases/download/v0.1/c3d_mean.npy' | ||
11 | + | ||
12 | + | ||
13 | +def preprocess_input(video): | ||
14 | + intervals = np.ceil(np.linspace(0, video.shape[0] - 1, 16)).astype(int) | ||
15 | + frames = video[intervals] | ||
16 | + | ||
17 | + # Reshape to 128x171 | ||
18 | + reshape_frames = np.zeros((frames.shape[0], 128, 171, frames.shape[3])) | ||
19 | + for i, img in enumerate(frames): | ||
20 | + img = imresize(img, (128, 171), 'bicubic') | ||
21 | + reshape_frames[i, :, :, :] = img | ||
22 | + | ||
23 | + mean_path = get_file('c3d_mean.npy', | ||
24 | + C3D_MEAN_PATH, | ||
25 | + cache_subdir='models', | ||
26 | + md5_hash='08a07d9761e76097985124d9e8b2fe34') | ||
27 | + | ||
28 | + mean = np.load(mean_path) | ||
29 | + reshape_frames -= mean | ||
30 | + # Crop to 112x112 | ||
31 | + reshape_frames = reshape_frames[:, 8:120, 30:142, :] | ||
32 | + # Add extra dimension for samples | ||
33 | + reshape_frames = np.expand_dims(reshape_frames, axis=0) | ||
34 | + | ||
35 | + return reshape_frames | ||
36 | + | ||
37 | + | ||
38 | + | ||
39 | +def c3d_feature_extractor(): | ||
40 | + feature_extractor_model = Model.load_keras(json_path='./c3d.json') | ||
41 | + return feature_extractor_model |
1 | +{"class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "Dense", "config": {"name": "dense_1", "use_bias": true, "bias_constraint": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": {"class_name": "L1L2", "config": {"l1": 0.0, "l2": 0.0010000000474974513}}, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "normal", "seed": null, "mode": "fan_avg", "scale": 1.0}}, "batch_input_shape": [null, 4096], "activity_regularizer": null, "activation": "relu", "kernel_constraint": null, "dtype": "float32", "trainable": true, "bias_regularizer": null, "units": 512}}, {"class_name": "Dropout", "config": {"name": "dropout_1", "dtype": "float32", "trainable": true, "seed": null, "rate": 0.6, "noise_shape": null}}, {"class_name": "Dense", "config": {"name": "dense_2", "use_bias": true, "bias_constraint": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": {"class_name": "L1L2", "config": {"l1": 0.0, "l2": 0.0010000000474974513}}, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "normal", "seed": null, "mode": "fan_avg", "scale": 1.0}}, "activity_regularizer": null, "activation": "linear", "kernel_constraint": null, "dtype": "float32", "trainable": true, "bias_regularizer": null, "units": 32}}, {"class_name": "Dropout", "config": {"name": "dropout_2", "dtype": "float32", "trainable": true, "seed": null, "rate": 0.6, "noise_shape": null}}, {"class_name": "Dense", "config": {"name": "dense_3", "use_bias": true, "bias_constraint": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": {"class_name": "L1L2", "config": {"l1": 0.0, "l2": 0.0010000000474974513}}, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "normal", "seed": null, "mode": "fan_avg", "scale": 1.0}}, "activity_regularizer": null, "activation": "sigmoid", "kernel_constraint": null, "dtype": "float32", "trainable": true, "bias_regularizer": null, "units": 1}}]}, "keras_version": "2.3.1", "backend": "tensorflow"} | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
1 | +import numpy as np | ||
2 | + | ||
3 | + | ||
4 | +def chunks(l, n): | ||
5 | + for i in range(0, len(l), n): | ||
6 | + yield l[i:i + n] | ||
7 | + | ||
8 | + | ||
9 | +def interpolate(features, features_per_bag): | ||
10 | + # 기존 172, 4096 | ||
11 | + feature_size = np.array(features).shape[1] | ||
12 | + # 32, 4096 | ||
13 | + interpolated_features = np.zeros((features_per_bag, feature_size)) | ||
14 | + interpolation_indicies = np.round(np.linspace(0, len(features) - 1, num=features_per_bag + 1)) | ||
15 | + count = 0 | ||
16 | + for index in range(0, len(interpolation_indicies) - 1): | ||
17 | + start = int(interpolation_indicies[index]) | ||
18 | + end = int(interpolation_indicies[index + 1]) | ||
19 | + | ||
20 | + assert end >= start | ||
21 | + | ||
22 | + if start == end: | ||
23 | + temp_vect = features[start, :] | ||
24 | + else: | ||
25 | + temp_vect = np.mean(features[start:end + 1, :], axis=0) | ||
26 | + | ||
27 | + temp_vect = temp_vect / np.linalg.norm(temp_vect) | ||
28 | + | ||
29 | + if np.linalg.norm(temp_vect) == 0: | ||
30 | + print("Error") | ||
31 | + | ||
32 | + interpolated_features[count, :] = temp_vect | ||
33 | + count = count + 1 | ||
34 | + | ||
35 | + return np.array(interpolated_features) | ||
36 | + | ||
37 | + | ||
38 | +def extrapolate(outputs, num_frames): | ||
39 | + extrapolated_outputs = [] | ||
40 | + extrapolation_indicies = np.round(np.linspace(0, len(outputs) - 1, num=num_frames)) | ||
41 | + for index in extrapolation_indicies: | ||
42 | + extrapolated_outputs.append(outputs[int(index)]) | ||
43 | + return np.array(extrapolated_outputs) |
1 | +import sys | ||
2 | +from pyspark import SparkContext | ||
3 | +from pyspark.streaming import StreamingContext | ||
4 | +from pyspark.streaming.kafka import KafkaUtils | ||
5 | + | ||
6 | +import cv2 | ||
7 | + | ||
8 | +import spark.parameters as params | ||
9 | +from spark.c3d import * | ||
10 | +from spark.classifier import * | ||
11 | + | ||
12 | +from spark.utils.array_util import * | ||
13 | + | ||
14 | + | ||
15 | +def deserializer(img): | ||
16 | + return img[0], np.frombuffer(img[1], dtype=np.uint8) | ||
17 | + | ||
18 | + | ||
19 | +def decode(img): | ||
20 | + return img[0], cv2.cvtColor(cv2.imdecode(img[1], cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB) | ||
21 | + | ||
22 | + | ||
23 | +def sliding_window(arr, size, stride): | ||
24 | + num_chunks = int((len(arr) - size) / stride) + 2 | ||
25 | + result = [] | ||
26 | + for i in range(0, num_chunks * stride, stride): | ||
27 | + if len(arr[i:i + size]) > 0: | ||
28 | + result.append(arr[i:i + size]) | ||
29 | + return np.array(result) | ||
30 | + | ||
31 | + | ||
32 | +sc = SparkContext(appName="test") | ||
33 | +ssc = StreamingContext(sc,1) | ||
34 | +brokers, topic = sys.argv[1:] | ||
35 | +kafka_stream = KafkaUtils.createDirectStream(ssc,[topic],{"metadata.broker.list":brokers}, valueDecoder=lambda x: x) | ||
36 | +frames = kafka_stream.map(deserializer).map(decode).map(lambda x: x[1]) | ||
37 | +frame_list = [] | ||
38 | +frames.foreachRDD(lambda x:frame_list.append(x.collect())) # rdd -> list | ||
39 | +video_clips = sliding_window(frame_list, params.frame_count, params.frame_count) # list -> np | ||
40 | + | ||
41 | +# build models | ||
42 | +feature_extractor = c3d_feature_extractor() | ||
43 | +classifier_model = build_classifier_model() | ||
44 | + | ||
45 | +# extract features | ||
46 | +rgb_features = [] | ||
47 | +for i, clip in enumerate(video_clips): | ||
48 | + clip = np.array(clip) | ||
49 | + if len(clip) < params.frame_count: | ||
50 | + continue | ||
51 | + | ||
52 | + clip = preprocess_input(clip) | ||
53 | + sc.parallelize(clip) | ||
54 | + rgb_feature = feature_extractor.predict(clip)[0] | ||
55 | + rgb_features.append(rgb_feature) | ||
56 | + | ||
57 | + | ||
58 | +rgb_features = np.array(rgb_features) | ||
59 | + | ||
60 | +# bag features | ||
61 | +rgb_feature_bag = interpolate(rgb_features, params.features_per_bag) | ||
62 | + | ||
63 | +# classify using the trained classifier model | ||
64 | +sc.parallelize(rgb_feature_bag) | ||
65 | +predictions = classifier_model.predict(rgb_feature_bag) | ||
66 | +predictions = np.array(predictions).squeeze() | ||
67 | + | ||
68 | +# predictions | ||
69 | +predictions = extrapolate(predictions, len(frame_list)) | ||
70 | +frames.pprint() | ||
71 | +ssc.start() | ||
72 | +ssc.awaitTermination() |
-
Please register or login to post a comment