From 7b2c3fdc1f6f2f009701d2f0ae1ee7d88425c947 Mon Sep 17 00:00:00 2001 From: Jamie Hardt Date: Wed, 5 Nov 2025 21:38:55 -0800 Subject: [PATCH] Removing dead code for ADM --- intern/generate_adm.py | 258 ++++++++++------------ intern/object_mix.py | 16 +- operator_convert_particles_to_speakers.py | 86 -------- 3 files changed, 129 insertions(+), 231 deletions(-) delete mode 100644 operator_convert_particles_to_speakers.py diff --git a/intern/generate_adm.py b/intern/generate_adm.py index 6a4a14f..73a6418 100644 --- a/intern/generate_adm.py +++ b/intern/generate_adm.py @@ -1,32 +1,14 @@ import bpy -import lxml -import uuid -from fractions import Fraction -import struct from os.path import dirname -import numpy - -from time import strftime - from typing import List -from ear.fileio.utils import openBw64 - -from ear.fileio.bw64.chunks import (FormatInfoChunk, ChnaChunk) - -from ear.fileio.adm import chna as adm_chna -from ear.fileio.adm.xml import adm_to_xml -from ear.fileio.adm.builder import (ADMBuilder) -from ear.fileio.adm.generate_ids import generate_ids - from .geom_utils import (speaker_active_time_range, speakers_by_min_distance, speakers_by_start_time) -from .object_mix import (ObjectMix, ObjectMixPool, - object_mixes_from_source_groups) +from .object_mix import (ObjectMixPool, object_mixes_from_source_groups) from .speaker_utils import (all_speakers) @@ -57,7 +39,8 @@ def group_speakers(speakers, scene) -> List[List[bpy.types.Object]]: ret_val = [[]] for spk in by_priority: - success = False # flaggy-flag because I can't do a break->continue from the inner + success = False # flaggy-flag because I can't do a break->continue + # from the inner for elem in ret_val: if list_can_accept_speaker(elem, spk): elem.append(spk) @@ -72,123 +55,123 @@ def group_speakers(speakers, scene) -> List[List[bpy.types.Object]]: return ret_val -def adm_for_object(scene: bpy.types.Scene, sound_object: ObjectMix, room_size, - adm_builder, object_index): - fps = scene.render.fps - frame_start = scene.frame_start - frame_end = scene.frame_end - - block_formats = sound_object.adm_block_formats(room_size=room_size) - - created = adm_builder.create_item_objects(track_index=object_index, - name=sound_object.object_name, - block_formats=block_formats) - - created.audio_object.start = Fraction(frame_start, fps) - created.audio_object.duration = Fraction(frame_end - frame_start, fps) - created.track_uid.sampleRate = sound_object.sample_rate - created.track_uid.bitDepth = sound_object.bits_per_sample +# def adm_for_object(scene: bpy.types.Scene, sound_object: ObjectMix, room_size, +# adm_builder, object_index): +# fps = scene.render.fps +# frame_start = scene.frame_start +# frame_end = scene.frame_end +# +# # block_formats = sound_object.adm_block_formats(room_size=room_size) +# +# created = adm_builder.create_item_objects(track_index=object_index, +# name=sound_object.object_name, +# block_formats=block_formats) +# +# created.audio_object.start = Fraction(frame_start, fps) +# created.audio_object.duration = Fraction(frame_end - frame_start, fps) +# created.track_uid.sampleRate = sound_object.sample_rate +# created.track_uid.bitDepth = sound_object.bits_per_sample -def adm_for_scene(scene: bpy.types.Scene, sound_object_mixes: List[ObjectMix], - room_size): - adm_builder = ADMBuilder() +# def adm_for_scene(scene: bpy.types.Scene, sound_object_mixes: List[ObjectMix], +# room_size): +# adm_builder = ADMBuilder() +# +# frame_start = scene.frame_start +# frame_end = scene.frame_end +# fps = scene.render.fps +# +# adm_builder.create_programme(audioProgrammeName=scene.name, +# start=Fraction(frame_start, fps), +# end=Fraction(frame_end, fps)) +# +# adm_builder.create_content(audioContentName="Objects") +# +# for object_index, sound_object in enumerate(sound_object_mixes): +# adm_for_object(scene, sound_object, room_size, +# adm_builder, object_index) +# +# adm = adm_builder.adm +# +# generate_ids(adm) +# chna = ChnaChunk() +# adm_chna.populate_chna_chunk(chna, adm) +# +# return adm_to_xml(adm), chna +# - frame_start = scene.frame_start - frame_end = scene.frame_end - fps = scene.render.fps - - adm_builder.create_programme(audioProgrammeName=scene.name, - start=Fraction(frame_start, fps), - end=Fraction(frame_end, fps)) - - adm_builder.create_content(audioContentName="Objects") - - for object_index, sound_object in enumerate(sound_object_mixes): - adm_for_object(scene, sound_object, room_size, - adm_builder, object_index) - - adm = adm_builder.adm - - generate_ids(adm) - chna = ChnaChunk() - adm_chna.populate_chna_chunk(chna, adm) - - return adm_to_xml(adm), chna +# def bext_data(scene, sample_rate, room_size): +# description = "SCENE={};ROOM_SIZE={}\n".format( +# scene.name, room_size).encode("ascii") +# originator_name = "Blender {}".format( +# bpy.app.version_string).encode("ascii") +# originator_ref = uuid.uuid1().hex.encode("ascii") +# date10 = strftime("%Y-%m-%d").encode("ascii") +# time8 = strftime("%H:%M:%S").encode("ascii") +# timeref = int(float(scene.frame_start) * +# sample_rate / float(scene.render.fps)) +# version = 0 +# umid = b"\0" * 64 +# pad = b"\0" * 190 +# +# data = struct.pack("<256s32s32s10s8sQH64s190s", description, +# originator_name, originator_ref, date10, time8, timeref, +# version, umid, pad) +# +# return data +# +# +# def attach_outfile_metadata(out_format, outfile, room_size, scene, +# sound_objects): +# adm, chna = adm_for_scene(scene, sound_objects, room_size=room_size) +# outfile.axml = lxml.etree.tostring(adm, pretty_print=True) +# outfile.chna = chna +# outfile.bext = bext_data(scene, out_format.sampleRate, room_size=room_size) +# +# +# def write_outfile_audio_data(outfile, shortest_file, sound_objects): +# READ_BLOCK = 1024 +# cursor = 0 +# +# # Not sure if this is necessary but lets do it +# for obj in sound_objects: +# obj.mixdown_reader.seek(0) +# +# while True: +# remainder = shortest_file - cursor +# to_read = min(READ_BLOCK, remainder) +# if to_read == 0: +# break +# +# buffer = numpy.zeros((to_read, len(sound_objects))) +# for i, sound_object in enumerate(sound_objects): +# buffer[:, i] = sound_object.mixdown_reader.read(to_read)[:, 0] +# +# outfile.write(buffer) +# cursor = cursor + to_read -def bext_data(scene, sample_rate, room_size): - description = "SCENE={};ROOM_SIZE={}\n".format( - scene.name, room_size).encode("ascii") - originator_name = "Blender {}".format( - bpy.app.version_string).encode("ascii") - originator_ref = uuid.uuid1().hex.encode("ascii") - date10 = strftime("%Y-%m-%d").encode("ascii") - time8 = strftime("%H:%M:%S").encode("ascii") - timeref = int(float(scene.frame_start) * - sample_rate / float(scene.render.fps)) - version = 0 - umid = b"\0" * 64 - pad = b"\0" * 190 - - data = struct.pack("<256s32s32s10s8sQH64s190s", description, - originator_name, originator_ref, date10, time8, timeref, - version, umid, pad) - - return data - - -def attach_outfile_metadata(out_format, outfile, room_size, scene, - sound_objects): - adm, chna = adm_for_scene(scene, sound_objects, room_size=room_size) - outfile.axml = lxml.etree.tostring(adm, pretty_print=True) - outfile.chna = chna - outfile.bext = bext_data(scene, out_format.sampleRate, room_size=room_size) - - -def write_outfile_audio_data(outfile, shortest_file, sound_objects): - READ_BLOCK = 1024 - cursor = 0 - - # Not sure if this is necessary but lets do it - for obj in sound_objects: - obj.mixdown_reader.seek(0) - - while True: - remainder = shortest_file - cursor - to_read = min(READ_BLOCK, remainder) - if to_read == 0: - break - - buffer = numpy.zeros((to_read, len(sound_objects))) - for i, sound_object in enumerate(sound_objects): - buffer[:, i] = sound_object.mixdown_reader.read(to_read)[:, 0] - - outfile.write(buffer) - cursor = cursor + to_read - - -def write_muxed_wav(mix_pool: ObjectMixPool, scene, out_format, room_size, - outfile, shortest_file): - sound_objects = mix_pool.object_mixes - attach_outfile_metadata(out_format, outfile, - room_size, scene, sound_objects) - write_outfile_audio_data(outfile, shortest_file, sound_objects) - - -def mux_adm_from_object_mix_pool(scene, mix_pool: ObjectMixPool, - output_filename, room_size=1.): - object_count = len(mix_pool.object_mixes) - assert object_count > 0 - - out_format = FormatInfoChunk(channelCount=object_count, - sampleRate=scene.render.ffmpeg.audio_mixrate, - bitsPerSample=24) - - with openBw64(output_filename, 'w', formatInfo=out_format) as outfile: - write_muxed_wav(mix_pool, scene, out_format, room_size, - outfile, mix_pool.shortest_file_length) - +# def write_muxed_wav(mix_pool: ObjectMixPool, scene, out_format, room_size, +# outfile, shortest_file): +# sound_objects = mix_pool.object_mixes +# attach_outfile_metadata(out_format, outfile, +# room_size, scene, sound_objects) +# write_outfile_audio_data(outfile, shortest_file, sound_objects) +# +# +# def mux_adm_from_object_mix_pool(scene, mix_pool: ObjectMixPool, +# output_filename, room_size=1.): +# object_count = len(mix_pool.object_mixes) +# assert object_count > 0 +# +# out_format = FormatInfoChunk(channelCount=object_count, +# sampleRate=scene.render.ffmpeg.audio_mixrate, +# bitsPerSample=24) +# +# with openBw64(output_filename, 'w', formatInfo=out_format) as outfile: +# write_muxed_wav(mix_pool, scene, out_format, room_size, +# outfile, mix_pool.shortest_file_length) +# def print_partition_results(object_groups, sound_sources, too_far_speakers): print("Will create {} objects for {} sources, ignoring {} sources".format( @@ -225,7 +208,8 @@ def partition_sounds_to_objects(scene, max_objects) -> \ return object_groups, too_far_speakers -def generate_adm(context: bpy.types.Context, filepath: str, room_size: float, +def generate_adm(context: bpy.types.Context, filepath: str, + room_size: float, max_objects: int) -> set[str]: scene = context.scene @@ -239,10 +223,8 @@ def generate_adm(context: bpy.types.Context, filepath: str, room_size: float, base_dir=dirname(filepath)) with ObjectMixPool(object_mixes=mix_groups) as pool: - mux_adm_from_object_mix_pool(scene, mix_pool=pool, - output_filename=filepath, - room_size=room_size) - print("Finished muxing ADM") + # here is where we place the mix objects into the session + pass - print("generate_adm exiting") + # print("generate_adm exiting") return {'FINISHED'} diff --git a/intern/object_mix.py b/intern/object_mix.py index 1eb6699..ef11720 100644 --- a/intern/object_mix.py +++ b/intern/object_mix.py @@ -9,7 +9,7 @@ from .speaker_utils import solo_speakers, unmute_all_speakers @contextmanager -def adm_object_rendering_context(scene: bpy.types.Scene): +def object_rendering_context(scene: bpy.types.Scene): old_ff = scene.render.image_settings.file_format old_codec = scene.render.ffmpeg.audio_codec old_chans = scene.render.ffmpeg.audio_channels @@ -34,8 +34,8 @@ class ObjectMix: self.intermediate_filename = None self.base_dir = base_dir self.scene = scene - self._mixdown_file_handle = None - self._mixdown_reader = None + # self._mixdown_file_handle = None + # self._mixdown_reader = None @property def frame_start(self): @@ -73,7 +73,7 @@ class ObjectMix: return self.sources[0].name def mixdown(self): - with adm_object_rendering_context(self.scene) as scene: + with object_rendering_context(self.scene) as scene: solo_speakers(scene, self.sources) scene_name = bpy.path.clean_name(scene.name) @@ -83,7 +83,7 @@ class ObjectMix: self.base_dir, "%s_%s.wav" % (scene_name, speaker_name)) bpy.ops.sound.mixdown(filepath=self.intermediate_filename, - container='WAV', codec='PCM', format='S24') + container='WAV', codec='PCM', format='F32') print("Created mixdown named {}" .format(self.intermediate_filename)) @@ -121,8 +121,10 @@ class ObjectMixPool: return min(lengths) -def object_mixes_from_source_groups(groups: List[List[bpy.types.Object]], - scene: bpy.types.Scene, base_dir: str): +def object_mixes_from_source_groups( + groups: List[List[bpy.types.Object]], + scene: bpy.types.Scene, base_dir: str) -> list[ObjectMix]: + mixes = [] for group in groups: mixes.append(ObjectMix(sources=group, scene=scene, base_dir=base_dir)) diff --git a/operator_convert_particles_to_speakers.py b/operator_convert_particles_to_speakers.py deleted file mode 100644 index a1c140c..0000000 --- a/operator_convert_particles_to_speakers.py +++ /dev/null @@ -1,86 +0,0 @@ -## This is copied from -## https://blender.stackexchange.com/questions/4956/convert-particle-system-to-animated-meshes?answertab=active#tab-top -# -# And needs to be adapted - -import bpy - -# Set these to False if you don't want to key that property. -KEYFRAME_LOCATION = True -KEYFRAME_ROTATION = True -KEYFRAME_SCALE = True -KEYFRAME_VISIBILITY = True # Viewport and render visibility. - -def create_objects_for_particles(ps, obj): - # Duplicate the given object for every particle and return the duplicates. - # Use instances instead of full copies. - obj_list = [] - mesh = obj.data - particles_coll = bpy.data.collections.new(name="particles") - bpy.context.scene.collection.children.link(particles_coll) - - for i, _ in enumerate(ps.particles): - dupli = bpy.data.objects.new( - name="particle.{:03d}".format(i), - object_data=mesh) - particles_coll.objects.link(dupli) - obj_list.append(dupli) - return obj_list - -def match_and_keyframe_objects(ps, obj_list, start_frame, end_frame): - # Match and keyframe the objects to the particles for every frame in the - # given range. - for frame in range(start_frame, end_frame + 1): - print("frame {} processed".format(frame)) - bpy.context.scene.frame_set(frame) - for p, obj in zip(ps.particles, obj_list): - match_object_to_particle(p, obj) - keyframe_obj(obj) - -def match_object_to_particle(p, obj): - # Match the location, rotation, scale and visibility of the object to - # the particle. - loc = p.location - rot = p.rotation - size = p.size - if p.alive_state == 'ALIVE': - vis = True - else: - vis = False - obj.location = loc - # Set rotation mode to quaternion to match particle rotation. - obj.rotation_mode = 'QUATERNION' - obj.rotation_quaternion = rot - obj.scale = (size, size, size) - obj.hide_viewport = not(vis) # <<<-- this was called "hide" in <= 2.79 - obj.hide_render = not(vis) - -def keyframe_obj(obj): - # Keyframe location, rotation, scale and visibility if specified. - if KEYFRAME_LOCATION: - obj.keyframe_insert("location") - if KEYFRAME_ROTATION: - obj.keyframe_insert("rotation_quaternion") - if KEYFRAME_SCALE: - obj.keyframe_insert("scale") - if KEYFRAME_VISIBILITY: - obj.keyframe_insert("hide_viewport") # <<<-- this was called "hide" in <= 2.79 - obj.keyframe_insert("hide_render") - -def main(): - #in 2.8 you need to evaluate the Dependency graph in order to get data from animation, modifiers, etc - depsgraph = bpy.context.evaluated_depsgraph_get() - - # Assume only 2 objects are selected. - # The active object should be the one with the particle system. - ps_obj = bpy.context.object - ps_obj_evaluated = depsgraph.objects[ ps_obj.name ] - obj = [obj for obj in bpy.context.selected_objects if obj != ps_obj][0] - ps = ps_obj_evaluated.particle_systems[0] # Assume only 1 particle system is present. - start_frame = bpy.context.scene.frame_start - end_frame = bpy.context.scene.frame_end - obj_list = create_objects_for_particles(ps, obj) - match_and_keyframe_objects(ps, obj_list, start_frame, end_frame) - -if __name__ == '__main__': - main()