From a85d796f070e3110d6b029318cd65c70c54771b7 Mon Sep 17 00:00:00 2001 From: Jamie Hardt Date: Wed, 5 Nov 2025 20:52:22 -0800 Subject: [PATCH] Cleanup and modernization --- intern/geom_utils.py | 38 +++++++++++++++++++++----------------- intern/object_mix.py | 35 ++++++++++++++++++++++++----------- 2 files changed, 45 insertions(+), 28 deletions(-) diff --git a/intern/geom_utils.py b/intern/geom_utils.py index 5b2c9fd..493af91 100644 --- a/intern/geom_utils.py +++ b/intern/geom_utils.py @@ -19,7 +19,8 @@ class FrameInterval: other.start_frame <= self.start_frame <= other.end_frame -def compute_relative_vector(camera: bpy.types.Camera, target: bpy.types.Object): +def compute_relative_vector(camera: bpy.types.Object, + target: bpy.types.Object): """ Return a vector from `camera` to `target` in the camera's coordinate space. @@ -44,22 +45,24 @@ def room_norm_vector(vec, room_size=1.) -> Vector: """ The Room is tearing me apart, Lisa. - The room is a cube with the camera at its center. We use a chebyshev normalization - to convert a vector in world or camera space into a vector the represents the projection - of that vector onto the room's walls. The Room Vector is the immediate the X, Y and Z - coordinate of the corresponding ADM Block Format source object position. + The room is a cube with the camera at its center. We use a chebyshev + normalization to convert a vector in world or camera space into a vector + the represents the projection of that vector onto the room's walls. The + Room Vector is the immediate the X, Y and Z coordinate of the corresponding + ADM Block Format source object position. - The Pro Tools/Dolby Atmos workflow I am targeting uses "Room Centric" panner coordinates - ("cartesian allocentric coordinates" in ADM speak) and this process seems to yield good - results. + The Pro Tools/Dolby Atmos workflow I am targeting uses "Room Centric" + panner coordinates ("cartesian allocentric coordinates" in ADM speak) and + this process seems to yield good results. - I also experimented with using normalized camera frame coordinates from the - bpy_extras.object_utils.world_to_camera_view method and this gives very good results as - long as the object is on-screen; coordinates for objects off the screen are unusable. + I also experimented with using normalized camera frame coordinates from the + bpy_extras.object_utils.world_to_camera_view method and this gives very + good results as long as the object is on-screen; coordinates for objects + off the screen are unusable. - In the future it would be worth exploring wether there's a way to produce ADM - coordinates that are "Screen-accurate" while the object is on-screen, but still gives - sensible results when the object is off-screen as well. + In the future it would be worth exploring wether there's a way to produce + ADM coordinates that are "Screen-accurate" while the object is on-screen, + but still gives sensible results when the object is off-screen as well. """ chebyshev = norm(vec, ord=numpy.inf) if chebyshev < room_size: @@ -70,8 +73,8 @@ def room_norm_vector(vec, room_size=1.) -> Vector: def closest_approach_to_camera(scene, speaker_object) -> tuple[float, int]: """ - The distance and frame number of `speaker_object`s closest point to - the scene's camera. + The distance and frame number of `speaker_object`s closest point to the + scene's camera. (Works for any object, not just speakers.) """ @@ -118,4 +121,5 @@ def speakers_by_min_distance(scene, speakers): def speakers_by_start_time(speaker_objs): - return sorted(speaker_objs, key=(lambda spk: speaker_active_time_range(spk).start_frame)) + return sorted(speaker_objs, + key=(lambda spk: speaker_active_time_range(spk).start_frame)) diff --git a/intern/object_mix.py b/intern/object_mix.py index 643b822..5b6e4c4 100644 --- a/intern/object_mix.py +++ b/intern/object_mix.py @@ -4,10 +4,12 @@ from contextlib import contextmanager from fractions import Fraction from typing import List -from ear.fileio.adm.elements import ObjectCartesianPosition, JumpPosition, AudioBlockFormatObjects +from ear.fileio.adm.elements import (ObjectCartesianPosition, JumpPosition, + AudioBlockFormatObjects) from ear.fileio.bw64 import Bw64Reader -from .geom_utils import speaker_active_time_range, compute_relative_vector, room_norm_vector +from .geom_utils import (speaker_active_time_range, compute_relative_vector, + room_norm_vector) from .speaker_utils import solo_speakers, unmute_all_speakers @@ -89,12 +91,14 @@ class ObjectMix: scene_name = bpy.path.clean_name(scene.name) speaker_name = bpy.path.clean_name(self.object_name) - self.intermediate_filename = os.path.join(self.base_dir, "%s_%s.wav" % (scene_name, speaker_name)) + self.intermediate_filename = os.path.join( + self.base_dir, "%s_%s.wav" % (scene_name, speaker_name)) bpy.ops.sound.mixdown(filepath=self.intermediate_filename, container='WAV', codec='PCM', format='S24') - print("Created mixdown named {}".format(self.intermediate_filename)) + print("Created mixdown named {}" + .format(self.intermediate_filename)) unmute_all_speakers(scene) @@ -104,16 +108,24 @@ class ObjectMix: for speaker_obj in self.sources: speaker_interval = speaker_active_time_range(speaker_obj) - for frame in range(speaker_interval.start_frame, speaker_interval.end_frame + 1): + for frame in range(speaker_interval.start_frame, + speaker_interval.end_frame + 1): + + assert self.scene.camera self.scene.frame_set(frame) - relative_vector = compute_relative_vector(camera=self.scene.camera, target=speaker_obj) + relative_vector = compute_relative_vector( + camera=self.scene.camera, + target=speaker_obj) - norm_vec = room_norm_vector(relative_vector, room_size=room_size) + norm_vec = room_norm_vector( + relative_vector, room_size=room_size) - pos = ObjectCartesianPosition(X=norm_vec.x, Y=norm_vec.y, Z=norm_vec.z) + pos = ObjectCartesianPosition(X=norm_vec.x, Y=norm_vec.y, + Z=norm_vec.z) if len(block_formats) == 0 or pos != block_formats[-1].position: - jp = JumpPosition(flag=True, interpolationLength=Fraction(1, fps * 2)) + jp = JumpPosition( + flag=True, interpolationLength=Fraction(1, fps * 2)) block = AudioBlockFormatObjects(position=pos, rtime=Fraction(frame, fps), duration=Fraction(1, fps), @@ -122,7 +134,8 @@ class ObjectMix: block_formats.append(block) else: - block_formats[-1].duration = block_formats[-1].duration + Fraction(1, fps) + block_formats[-1].duration = block_formats[-1].duration + \ + Fraction(1, fps) return block_formats @@ -157,7 +170,7 @@ class ObjectMixPool: return min(lengths) -def object_mixes_from_source_groups(groups: List[List[bpy.types.Object]], +def object_mixes_from_source_groups(groups: List[List[bpy.types.Object]], scene: bpy.types.Scene, base_dir: str): mixes = [] for group in groups: