122 lines
4.1 KiB
Python
122 lines
4.1 KiB
Python
import sys
|
|
from math import sqrt
|
|
|
|
import bpy
|
|
|
|
import numpy
|
|
from numpy.linalg import norm
|
|
|
|
from mathutils import Vector, Quaternion
|
|
|
|
|
|
class FrameInterval:
|
|
def __init__(self, start_frame, end_frame):
|
|
self.start_frame = int(start_frame)
|
|
self.end_frame = int(end_frame)
|
|
|
|
def overlaps(self, other: 'FrameInterval') -> bool:
|
|
return self.start_frame <= other.start_frame <= self.end_frame or \
|
|
other.start_frame <= self.start_frame <= other.end_frame
|
|
|
|
|
|
def compute_relative_vector(camera: bpy.types.Camera, target: bpy.types.Object):
|
|
"""
|
|
Return a vector from `camera` to `target` in the camera's coordinate space.
|
|
|
|
The camera's lens is assumed to be norm to the ZX plane.
|
|
"""
|
|
cam_loc, cam_rot, _ = camera.matrix_world.decompose()
|
|
target_loc, _, _ = target.matrix_world.decompose()
|
|
relative_vector = target_loc - cam_loc
|
|
|
|
rotation = cam_rot.to_matrix().transposed()
|
|
relative_vector.rotate(rotation)
|
|
|
|
# The camera's worldvector is norm to the horizon, we want a vector
|
|
# down the barrel.
|
|
camera_correction = Quaternion((sqrt(2.) / 2., sqrt(2.) / 2., 0., 0.))
|
|
relative_vector.rotate(camera_correction)
|
|
|
|
return relative_vector
|
|
|
|
|
|
def room_norm_vector(vec, room_size=1.) -> Vector:
|
|
"""
|
|
The Room is tearing me apart, Lisa.
|
|
|
|
The room is a cube with the camera at its center. We use a chebyshev normalization
|
|
to convert a vector in world or camera space into a vector the represents the projection
|
|
of that vector onto the room's walls. The Room Vector is the immediate the X, Y and Z
|
|
coordinate of the corresponding ADM Block Format source object position.
|
|
|
|
The Pro Tools/Dolby Atmos workflow I am targeting uses "Room Centric" panner coordinates
|
|
("cartesian allocentric coordinates" in ADM speak) and this process seems to yield good
|
|
results.
|
|
|
|
I also experimented with using normalized camera frame coordinates from the
|
|
bpy_extras.object_utils.world_to_camera_view method and this gives very good results as
|
|
long as the object is on-screen; coordinates for objects off the screen are unusable.
|
|
|
|
In the future it would be worth exploring wether there's a way to produce ADM
|
|
coordinates that are "Screen-accurate" while the object is on-screen, but still gives
|
|
sensible results when the object is off-screen as well.
|
|
"""
|
|
chebyshev = norm(vec, ord=numpy.inf)
|
|
if chebyshev < room_size:
|
|
return vec / room_size
|
|
else:
|
|
return vec / chebyshev
|
|
|
|
|
|
def closest_approach_to_camera(scene, speaker_object) -> tuple[float, int]:
|
|
"""
|
|
The distance and frame number of `speaker_object`s closest point to
|
|
the scene's camera.
|
|
|
|
(Works for any object, not just speakers.)
|
|
"""
|
|
max_dist = sys.float_info.max
|
|
at_time = scene.frame_start
|
|
for frame in range(scene.frame_start, scene.frame_end + 1):
|
|
scene.frame_set(frame)
|
|
rel = speaker_object.matrix_world.to_translation() - \
|
|
scene.camera.matrix_world.to_translation()
|
|
dist = float(norm(rel))
|
|
|
|
if dist < max_dist:
|
|
max_dist = dist
|
|
at_time = frame
|
|
|
|
return max_dist, at_time
|
|
|
|
|
|
def speaker_active_time_range(speaker) -> FrameInterval:
|
|
"""
|
|
The time range of the first strip, of the first NLA Track of this speaker.
|
|
"""
|
|
start, end = 0xffffffff, -0xffffffff
|
|
for track in speaker.animation_data.nla_tracks:
|
|
for strip in track.strips:
|
|
if strip.frame_start < start:
|
|
start = strip.frame_start
|
|
|
|
if strip.frame_end > end:
|
|
end = strip.frame_end
|
|
|
|
return FrameInterval(start_frame=start, end_frame=end)
|
|
|
|
|
|
def speakers_by_min_distance(scene, speakers):
|
|
"""
|
|
Sorts a list of speaker objects in ascending order by their closest
|
|
approach to the camera. Objects that approach closest are sorted highest.
|
|
"""
|
|
def min_distance(speaker):
|
|
return closest_approach_to_camera(scene, speaker)[0]
|
|
|
|
return sorted(speakers, key=(lambda spk: min_distance(spk)))
|
|
|
|
|
|
def speakers_by_start_time(speaker_objs):
|
|
return sorted(speaker_objs, key=(lambda spk: speaker_active_time_range(spk).start_frame))
|