This is how to do it using the Python API, see this answer for a reference:
- add a viewer node and connect the
Z ouput of the Render Layers to the Viewer node
- get the pixel data of the viewer
- perform the depth "clipping" in Python
- copy back to a
bpy.ops.image
- save the image in OpenEXR format
Script:
import bpy
import numpy as np
# https://blender.stackexchange.com/questions/35191/how-to-get-color-and-z-depth-from-viewer-node/71264#71264
bpy.context.scene.render.use_compositing = True
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
for n in tree.nodes:
tree.nodes.remove(n)
rl = tree.nodes.new('CompositorNodeRLayers')
vl = tree.nodes.new('CompositorNodeViewer')
vl.use_alpha = True
links.new(rl.outputs[0], vl.inputs[0]) # link Render Image to Viewer Image
links.new(rl.outputs[2], vl.inputs[1]) # link Render Z to Viewer Alpha
#render
bpy.context.scene.render.resolution_percentage = 100 #make sure scene height and width are ok (edit)
step_count = 251
scene = bpy.context.scene
for step in range(1, step_count):
# Set render frame
scene.frame_set(step)
# Render
bpy.ops.render.render()
#get the pixels and put them into a numpy array
pixels = np.array(bpy.data.images['Viewer Node'].pixels)
width = bpy.context.scene.render.resolution_x
height = bpy.context.scene.render.resolution_y
#reshaping into image array 4 channel (rgbz)
image = pixels.reshape(height,width,4)
#get only depth
z = image[:,:,3]
#simulate depth sensor fov / depth range
def clip_depth(z, z_near, z_far, default_value=0):
if z > z_far:
return default_value
elif z < z_near:
return default_value
else:
return z
clip_func = np.vectorize(clip_depth, otypes=[np.float])
z_near = 1
z_far = 8
z_clip = clip_func(z, z_near, z_far)
#save depth to OpenEXR format
image_settings = bpy.context.scene.render.image_settings
image_settings.file_format = "OPEN_EXR"
image_settings.color_depth = '32'
#create an HDR image with name 'img_z'
image_name = 'img_z'
bpy.ops.image.new(name=image_name, width=z_clip.shape[1], height=z_clip.shape[0], alpha=True, float=True)
img_z_clip = bpy.data.images[image_name]
z_clip_rgba = np.dstack((z_clip, z_clip, z_clip, z_clip))
img_z_clip.pixels = z_clip_rgba.ravel()
img_z_clip.save_render('/tmp/blender_z_depth_clip_%04d.exr' % step)