Cleanup and modernization

This commit is contained in:
2025-11-05 20:52:22 -08:00
parent 5e4fae092a
commit a85d796f07
2 changed files with 45 additions and 28 deletions

View File

@@ -19,7 +19,8 @@ class FrameInterval:
other.start_frame <= self.start_frame <= other.end_frame
def compute_relative_vector(camera: bpy.types.Camera, target: bpy.types.Object):
def compute_relative_vector(camera: bpy.types.Object,
target: bpy.types.Object):
"""
Return a vector from `camera` to `target` in the camera's coordinate space.
@@ -44,22 +45,24 @@ def room_norm_vector(vec, room_size=1.) -> Vector:
"""
The Room is tearing me apart, Lisa.
The room is a cube with the camera at its center. We use a chebyshev normalization
to convert a vector in world or camera space into a vector the represents the projection
of that vector onto the room's walls. The Room Vector is the immediate the X, Y and Z
coordinate of the corresponding ADM Block Format source object position.
The room is a cube with the camera at its center. We use a chebyshev
normalization to convert a vector in world or camera space into a vector
the represents the projection of that vector onto the room's walls. The
Room Vector is the immediate the X, Y and Z coordinate of the corresponding
ADM Block Format source object position.
The Pro Tools/Dolby Atmos workflow I am targeting uses "Room Centric" panner coordinates
("cartesian allocentric coordinates" in ADM speak) and this process seems to yield good
results.
The Pro Tools/Dolby Atmos workflow I am targeting uses "Room Centric"
panner coordinates ("cartesian allocentric coordinates" in ADM speak) and
this process seems to yield good results.
I also experimented with using normalized camera frame coordinates from the
bpy_extras.object_utils.world_to_camera_view method and this gives very good results as
long as the object is on-screen; coordinates for objects off the screen are unusable.
I also experimented with using normalized camera frame coordinates from the
bpy_extras.object_utils.world_to_camera_view method and this gives very
good results as long as the object is on-screen; coordinates for objects
off the screen are unusable.
In the future it would be worth exploring wether there's a way to produce ADM
coordinates that are "Screen-accurate" while the object is on-screen, but still gives
sensible results when the object is off-screen as well.
In the future it would be worth exploring wether there's a way to produce
ADM coordinates that are "Screen-accurate" while the object is on-screen,
but still gives sensible results when the object is off-screen as well.
"""
chebyshev = norm(vec, ord=numpy.inf)
if chebyshev < room_size:
@@ -70,8 +73,8 @@ def room_norm_vector(vec, room_size=1.) -> Vector:
def closest_approach_to_camera(scene, speaker_object) -> tuple[float, int]:
"""
The distance and frame number of `speaker_object`s closest point to
the scene's camera.
The distance and frame number of `speaker_object`s closest point to the
scene's camera.
(Works for any object, not just speakers.)
"""
@@ -118,4 +121,5 @@ def speakers_by_min_distance(scene, speakers):
def speakers_by_start_time(speaker_objs):
return sorted(speaker_objs, key=(lambda spk: speaker_active_time_range(spk).start_frame))
return sorted(speaker_objs,
key=(lambda spk: speaker_active_time_range(spk).start_frame))