render_depth.py
3.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
# Author: Wentao Yuan (wyuan1@cs.cmu.edu) 05/31/2018
import bpy
import mathutils
import numpy as np
import os
import sys
import time
# Usage: blender -b -P render_depth.py [ShapeNet directory] [model list] [output directory] [num scans per model]
def random_pose():
angle_x = np.random.uniform() * 2 * np.pi
angle_y = np.random.uniform() * 2 * np.pi
angle_z = np.random.uniform() * 2 * np.pi
Rx = np.array([[1, 0, 0],
[0, np.cos(angle_x), -np.sin(angle_x)],
[0, np.sin(angle_x), np.cos(angle_x)]])
Ry = np.array([[np.cos(angle_y), 0, np.sin(angle_y)],
[0, 1, 0],
[-np.sin(angle_y), 0, np.cos(angle_y)]])
Rz = np.array([[np.cos(angle_z), -np.sin(angle_z), 0],
[np.sin(angle_z), np.cos(angle_z), 0],
[0, 0, 1]])
R = np.dot(Rz, np.dot(Ry, Rx))
# Set camera pointing to the origin and 1 unit away from the origin
t = np.expand_dims(R[:, 2], 1)
pose = np.concatenate([np.concatenate([R, t], 1), [[0, 0, 0, 1]]], 0)
return pose
def setup_blender(width, height, focal_length):
# camera
camera = bpy.data.objects['Camera']
camera.data.angle = np.arctan(width / 2 / focal_length) * 2
# render layer
scene = bpy.context.scene
scene.render.filepath = 'buffer'
scene.render.image_settings.color_depth = '16'
scene.render.resolution_percentage = 100
scene.render.resolution_x = width
scene.render.resolution_y = height
# compositor nodes
scene.use_nodes = True
tree = scene.node_tree
rl = tree.nodes.new('CompositorNodeRLayers')
output = tree.nodes.new('CompositorNodeOutputFile')
output.base_path = ''
output.format.file_format = 'OPEN_EXR'
tree.links.new(rl.outputs['Depth'], output.inputs[0])
# remove default cube
bpy.data.objects['Cube'].select = True
bpy.ops.object.delete()
return scene, camera, output
if __name__ == '__main__':
model_dir = sys.argv[-4]
list_path = sys.argv[-3]
output_dir = sys.argv[-2]
num_scans = int(sys.argv[-1])
width = 160
height = 120
focal = 100
scene, camera, output = setup_blender(width, height, focal)
intrinsics = np.array([[focal, 0, width / 2], [0, focal, height / 2], [0, 0, 1]])
with open(os.path.join(list_path)) as file:
model_list = [line.strip() for line in file]
open('blender.log', 'w+').close()
os.system('rm -rf %s' % output_dir)
os.makedirs(output_dir)
np.savetxt(os.path.join(output_dir, 'intrinsics.txt'), intrinsics, '%f')
for model_id in model_list:
start = time.time()
exr_dir = os.path.join(output_dir, 'exr', model_id)
pose_dir = os.path.join(output_dir, 'pose', model_id)
os.makedirs(exr_dir)
os.makedirs(pose_dir)
# Redirect output to log file
old_os_out = os.dup(1)
os.close(1)
os.open('blender.log', os.O_WRONLY)
# Import mesh model
model_path = os.path.join(model_dir, model_id, 'model.obj')
bpy.ops.import_scene.obj(filepath=model_path)
# Rotate model by 90 degrees around x-axis (z-up => y-up) to match ShapeNet's coordinates
bpy.ops.transform.rotate(value=-np.pi / 2, axis=(1, 0, 0))
# Render
for i in range(num_scans):
scene.frame_set(i)
pose = random_pose()
camera.matrix_world = mathutils.Matrix(pose)
output.file_slots[0].path = os.path.join(exr_dir, '#.exr')
bpy.ops.render.render(write_still=True)
np.savetxt(os.path.join(pose_dir, '%d.txt' % i), pose, '%f')
# Clean up
bpy.ops.object.delete()
for m in bpy.data.meshes:
bpy.data.meshes.remove(m)
for m in bpy.data.materials:
m.user_clear()
bpy.data.materials.remove(m)
# Show time
os.close(1)
os.dup(old_os_out)
os.close(old_os_out)
print('%s done, time=%.4f sec' % (model_id, time.time() - start))