diff --git a/intern/add_sound_to_meshes.py b/intern/add_sound_to_meshes.py index b14192f..897ce69 100644 --- a/intern/add_sound_to_meshes.py +++ b/intern/add_sound_to_meshes.py @@ -27,16 +27,20 @@ class SpatialEnvelope: exits_range: int -def sound_camera_spatial_envelope(scene: bpy.types.Scene, speaker_obj, considered_range: float) -> SpatialEnvelope: +def sound_camera_spatial_envelope(scene: bpy.types.Scene, speaker_obj, + considered_range: float) -> SpatialEnvelope: min_dist = sys.float_info.max min_dist_frame = scene.frame_start enters_range_frame = None exits_range_frame = None + assert scene.camera + in_range = False for frame in range(scene.frame_start, scene.frame_end + 1): scene.frame_set(frame) - rel = speaker_obj.matrix_world.to_translation() - scene.camera.matrix_world.to_translation() + rel = speaker_obj.matrix_world.to_translation() \ + - scene.camera.matrix_world.to_translation() dist = norm(rel) if dist < considered_range and not in_range: @@ -44,7 +48,7 @@ def sound_camera_spatial_envelope(scene: bpy.types.Scene, speaker_obj, considere in_range = True if dist < min_dist: - min_dist = dist + min_dist = float(dist) min_dist_frame = frame if dist > considered_range and in_range: @@ -52,6 +56,9 @@ def sound_camera_spatial_envelope(scene: bpy.types.Scene, speaker_obj, considere in_range = False break + assert enters_range_frame + assert exits_range_frame + return SpatialEnvelope(considered_range=considered_range, enters_range=enters_range_frame, exits_range=exits_range_frame, @@ -64,7 +71,8 @@ def closest_approach_to_camera(scene, speaker_object): at_time = scene.frame_start for frame in range(scene.frame_start, scene.frame_end + 1): scene.frame_set(frame) - rel = speaker_object.matrix_world.to_translation() - scene.camera.matrix_world.to_translation() + rel = speaker_object.matrix_world.to_translation( + ) - scene.camera.matrix_world.to_translation() dist = norm(rel) if dist < max_dist: @@ -74,7 +82,7 @@ def closest_approach_to_camera(scene, speaker_object): return (max_dist, at_time) -def track_speaker_to_camera(speaker, camera): +def track_speaker_to_camera(speaker): camera_lock = speaker.constraints.new('TRACK_TO') camera_lock.target = bpy.context.scene.camera camera_lock.use_target_z = True @@ -89,7 +97,8 @@ def spot_audio(context, speaker, trigger_mode, sync_peak, sound_peak, sound_leng audio_scene_in = envelope.closest_range elif trigger_mode == TriggerMode.RANDOM: - audio_scene_in = floor(uniform(context.scene.frame_start, context.scene.frame_end)) + audio_scene_in = floor( + uniform(context.scene.frame_start, context.scene.frame_end)) elif trigger_mode == TriggerMode.RANDOM_GAUSSIAN: mean = (context.scene.frame_end - context.scene.frame_start) / 2 audio_scene_in = floor(gauss(mean, gaussian_stddev)) @@ -127,11 +136,6 @@ def constrain_speaker_to_mesh(speaker_obj, mesh): location_loc.target = mesh location_loc.target = mesh - -def apply_gain_envelope(speaker_obj, envelope): - pass - - def add_speakers_to_meshes(meshes, context, sound=None, sound_name_prefix=None, sync_peak=False, @@ -146,7 +150,8 @@ def add_speakers_to_meshes(meshes, context, sound=None, print("object is not mesh") continue - envelope = sound_camera_spatial_envelope(context.scene, mesh, considered_range=5.) + envelope = sound_camera_spatial_envelope( + context.scene, mesh, considered_range=5.) speaker_obj = next((spk for spk in context.scene.objects if spk.type == 'SPEAKER' and spk.constraints['Copy Location'].target == mesh), None) @@ -156,7 +161,7 @@ def add_speakers_to_meshes(meshes, context, sound=None, speaker_obj = context.selected_objects[0] constrain_speaker_to_mesh(speaker_obj, mesh) - track_speaker_to_camera(speaker_obj, context.scene.camera) + track_speaker_to_camera(speaker_obj) if sound_name_prefix is not None: sound = sound_bank.random_sound() diff --git a/intern/generate_adm.py b/intern/generate_adm.py index 0e577ab..b43a212 100644 --- a/intern/generate_adm.py +++ b/intern/generate_adm.py @@ -1,7 +1,5 @@ import bpy -from contextlib import contextmanager - import lxml import uuid from fractions import Fraction @@ -33,6 +31,17 @@ from .speaker_utils import (all_speakers) def group_speakers(speakers, scene) -> List[List[bpy.types.Object]]: + """ + Accepts a list of speakers and a scene, and returns a list of lists. + + Each list contains a list of speakers which are guaranteed to not have + overlapping sounds. Each of the child lists contains a list of speaker + objects in ascending order by start time. + + Speakers are allocated to lists on the basis of their minimum distance to + the camera according to `speakers_by_min_distance`. Closer sounds will + appear on the earliest list if there is no overlap. + """ def list_can_accept_speaker(speaker_list, speaker_to_test): test_range = speaker_active_time_range(speaker_to_test) @@ -62,7 +71,7 @@ def group_speakers(speakers, scene) -> List[List[bpy.types.Object]]: return ret_val -def adm_for_object(scene, sound_object: ObjectMix, room_size, adm_builder, object_index): +def adm_for_object(scene: bpy.types.Scene, sound_object: ObjectMix, room_size, adm_builder, object_index): fps = scene.render.fps frame_start = scene.frame_start frame_end = scene.frame_end @@ -79,7 +88,7 @@ def adm_for_object(scene, sound_object: ObjectMix, room_size, adm_builder, objec created.track_uid.bitDepth = sound_object.bits_per_sample -def adm_for_scene(scene, sound_objects: List[ObjectMix], room_size): +def adm_for_scene(scene: bpy.types.Scene, sound_object_mixes: List[ObjectMix], room_size): adm_builder = ADMBuilder() frame_start = scene.frame_start @@ -92,7 +101,7 @@ def adm_for_scene(scene, sound_objects: List[ObjectMix], room_size): adm_builder.create_content(audioContentName="Objects") - for object_index, sound_object in enumerate(sound_objects): + for object_index, sound_object in enumerate(sound_object_mixes): adm_for_object(scene, sound_object, room_size, adm_builder, object_index) adm = adm_builder.adm @@ -178,11 +187,19 @@ def print_partition_results(object_groups, sound_sources, too_far_speakers): print(" - %s" % source.name) -def partition_sounds_to_objects(scene, max_objects): +def partition_sounds_to_objects(scene, max_objects) -> \ + tuple[list[list[bpy.types.Speaker]], list[bpy.types.Speaker]]: + """ + Allocates sounds in the scene into non-overlapping lists of sounds. The + second return value is the list of sounds that could not be allocated + because the max_objects limit was exceeded. + + Sounds are allocated to lists according to `group_speakers`. + """ sound_sources = all_speakers(scene) if len(sound_sources) == 0: - return [] + return [], [] object_groups = group_speakers(sound_sources, scene) too_far_speakers = [] @@ -196,7 +213,8 @@ def partition_sounds_to_objects(scene, max_objects): return object_groups, too_far_speakers -def generate_adm(context: bpy.types.Context, filepath: str, room_size: float, max_objects: int): +def generate_adm(context: bpy.types.Context, filepath: str, room_size: float, + max_objects: int) -> dict: scene = context.scene object_groups, _ = partition_sounds_to_objects(scene, max_objects) diff --git a/intern/geom_utils.py b/intern/geom_utils.py index 7059b27..e35e7d2 100644 --- a/intern/geom_utils.py +++ b/intern/geom_utils.py @@ -105,6 +105,10 @@ def speaker_active_time_range(speaker) -> FrameInterval: def speakers_by_min_distance(scene, speakers): + """ + Sorts a list of speaker objects in ascending order by their closest + approach to the camera. Objects that approach closest are sorted highest. + """ def min_distance(speaker): return closest_approach_to_camera(scene, speaker)[0] diff --git a/intern/speaker_utils.py b/intern/speaker_utils.py index 6f4b1a0..c56a19b 100644 --- a/intern/speaker_utils.py +++ b/intern/speaker_utils.py @@ -1,4 +1,6 @@ -def all_speakers(scene): +import bpy + +def all_speakers(scene: bpy.types.Scene) -> list[bpy.types.Object]: return [obj for obj in scene.objects if obj.type == 'SPEAKER']