Removing dead code for ADM

This commit is contained in:
2025-11-05 21:38:55 -08:00
parent b83476f820
commit 7b2c3fdc1f
3 changed files with 129 additions and 231 deletions

View File

@@ -1,32 +1,14 @@
import bpy import bpy
import lxml
import uuid
from fractions import Fraction
import struct
from os.path import dirname from os.path import dirname
import numpy
from time import strftime
from typing import List from typing import List
from ear.fileio.utils import openBw64
from ear.fileio.bw64.chunks import (FormatInfoChunk, ChnaChunk)
from ear.fileio.adm import chna as adm_chna
from ear.fileio.adm.xml import adm_to_xml
from ear.fileio.adm.builder import (ADMBuilder)
from ear.fileio.adm.generate_ids import generate_ids
from .geom_utils import (speaker_active_time_range, from .geom_utils import (speaker_active_time_range,
speakers_by_min_distance, speakers_by_min_distance,
speakers_by_start_time) speakers_by_start_time)
from .object_mix import (ObjectMix, ObjectMixPool, from .object_mix import (ObjectMixPool, object_mixes_from_source_groups)
object_mixes_from_source_groups)
from .speaker_utils import (all_speakers) from .speaker_utils import (all_speakers)
@@ -57,7 +39,8 @@ def group_speakers(speakers, scene) -> List[List[bpy.types.Object]]:
ret_val = [[]] ret_val = [[]]
for spk in by_priority: for spk in by_priority:
success = False # flaggy-flag because I can't do a break->continue from the inner success = False # flaggy-flag because I can't do a break->continue
# from the inner
for elem in ret_val: for elem in ret_val:
if list_can_accept_speaker(elem, spk): if list_can_accept_speaker(elem, spk):
elem.append(spk) elem.append(spk)
@@ -72,123 +55,123 @@ def group_speakers(speakers, scene) -> List[List[bpy.types.Object]]:
return ret_val return ret_val
def adm_for_object(scene: bpy.types.Scene, sound_object: ObjectMix, room_size, # def adm_for_object(scene: bpy.types.Scene, sound_object: ObjectMix, room_size,
adm_builder, object_index): # adm_builder, object_index):
fps = scene.render.fps # fps = scene.render.fps
frame_start = scene.frame_start # frame_start = scene.frame_start
frame_end = scene.frame_end # frame_end = scene.frame_end
#
block_formats = sound_object.adm_block_formats(room_size=room_size) # # block_formats = sound_object.adm_block_formats(room_size=room_size)
#
created = adm_builder.create_item_objects(track_index=object_index, # created = adm_builder.create_item_objects(track_index=object_index,
name=sound_object.object_name, # name=sound_object.object_name,
block_formats=block_formats) # block_formats=block_formats)
#
created.audio_object.start = Fraction(frame_start, fps) # created.audio_object.start = Fraction(frame_start, fps)
created.audio_object.duration = Fraction(frame_end - frame_start, fps) # created.audio_object.duration = Fraction(frame_end - frame_start, fps)
created.track_uid.sampleRate = sound_object.sample_rate # created.track_uid.sampleRate = sound_object.sample_rate
created.track_uid.bitDepth = sound_object.bits_per_sample # created.track_uid.bitDepth = sound_object.bits_per_sample
def adm_for_scene(scene: bpy.types.Scene, sound_object_mixes: List[ObjectMix], # def adm_for_scene(scene: bpy.types.Scene, sound_object_mixes: List[ObjectMix],
room_size): # room_size):
adm_builder = ADMBuilder() # adm_builder = ADMBuilder()
#
# frame_start = scene.frame_start
# frame_end = scene.frame_end
# fps = scene.render.fps
#
# adm_builder.create_programme(audioProgrammeName=scene.name,
# start=Fraction(frame_start, fps),
# end=Fraction(frame_end, fps))
#
# adm_builder.create_content(audioContentName="Objects")
#
# for object_index, sound_object in enumerate(sound_object_mixes):
# adm_for_object(scene, sound_object, room_size,
# adm_builder, object_index)
#
# adm = adm_builder.adm
#
# generate_ids(adm)
# chna = ChnaChunk()
# adm_chna.populate_chna_chunk(chna, adm)
#
# return adm_to_xml(adm), chna
#
frame_start = scene.frame_start # def bext_data(scene, sample_rate, room_size):
frame_end = scene.frame_end # description = "SCENE={};ROOM_SIZE={}\n".format(
fps = scene.render.fps # scene.name, room_size).encode("ascii")
# originator_name = "Blender {}".format(
adm_builder.create_programme(audioProgrammeName=scene.name, # bpy.app.version_string).encode("ascii")
start=Fraction(frame_start, fps), # originator_ref = uuid.uuid1().hex.encode("ascii")
end=Fraction(frame_end, fps)) # date10 = strftime("%Y-%m-%d").encode("ascii")
# time8 = strftime("%H:%M:%S").encode("ascii")
adm_builder.create_content(audioContentName="Objects") # timeref = int(float(scene.frame_start) *
# sample_rate / float(scene.render.fps))
for object_index, sound_object in enumerate(sound_object_mixes): # version = 0
adm_for_object(scene, sound_object, room_size, # umid = b"\0" * 64
adm_builder, object_index) # pad = b"\0" * 190
#
adm = adm_builder.adm # data = struct.pack("<256s32s32s10s8sQH64s190s", description,
# originator_name, originator_ref, date10, time8, timeref,
generate_ids(adm) # version, umid, pad)
chna = ChnaChunk() #
adm_chna.populate_chna_chunk(chna, adm) # return data
#
return adm_to_xml(adm), chna #
# def attach_outfile_metadata(out_format, outfile, room_size, scene,
# sound_objects):
# adm, chna = adm_for_scene(scene, sound_objects, room_size=room_size)
# outfile.axml = lxml.etree.tostring(adm, pretty_print=True)
# outfile.chna = chna
# outfile.bext = bext_data(scene, out_format.sampleRate, room_size=room_size)
#
#
# def write_outfile_audio_data(outfile, shortest_file, sound_objects):
# READ_BLOCK = 1024
# cursor = 0
#
# # Not sure if this is necessary but lets do it
# for obj in sound_objects:
# obj.mixdown_reader.seek(0)
#
# while True:
# remainder = shortest_file - cursor
# to_read = min(READ_BLOCK, remainder)
# if to_read == 0:
# break
#
# buffer = numpy.zeros((to_read, len(sound_objects)))
# for i, sound_object in enumerate(sound_objects):
# buffer[:, i] = sound_object.mixdown_reader.read(to_read)[:, 0]
#
# outfile.write(buffer)
# cursor = cursor + to_read
def bext_data(scene, sample_rate, room_size): # def write_muxed_wav(mix_pool: ObjectMixPool, scene, out_format, room_size,
description = "SCENE={};ROOM_SIZE={}\n".format( # outfile, shortest_file):
scene.name, room_size).encode("ascii") # sound_objects = mix_pool.object_mixes
originator_name = "Blender {}".format( # attach_outfile_metadata(out_format, outfile,
bpy.app.version_string).encode("ascii") # room_size, scene, sound_objects)
originator_ref = uuid.uuid1().hex.encode("ascii") # write_outfile_audio_data(outfile, shortest_file, sound_objects)
date10 = strftime("%Y-%m-%d").encode("ascii") #
time8 = strftime("%H:%M:%S").encode("ascii") #
timeref = int(float(scene.frame_start) * # def mux_adm_from_object_mix_pool(scene, mix_pool: ObjectMixPool,
sample_rate / float(scene.render.fps)) # output_filename, room_size=1.):
version = 0 # object_count = len(mix_pool.object_mixes)
umid = b"\0" * 64 # assert object_count > 0
pad = b"\0" * 190 #
# out_format = FormatInfoChunk(channelCount=object_count,
data = struct.pack("<256s32s32s10s8sQH64s190s", description, # sampleRate=scene.render.ffmpeg.audio_mixrate,
originator_name, originator_ref, date10, time8, timeref, # bitsPerSample=24)
version, umid, pad) #
# with openBw64(output_filename, 'w', formatInfo=out_format) as outfile:
return data # write_muxed_wav(mix_pool, scene, out_format, room_size,
# outfile, mix_pool.shortest_file_length)
#
def attach_outfile_metadata(out_format, outfile, room_size, scene,
sound_objects):
adm, chna = adm_for_scene(scene, sound_objects, room_size=room_size)
outfile.axml = lxml.etree.tostring(adm, pretty_print=True)
outfile.chna = chna
outfile.bext = bext_data(scene, out_format.sampleRate, room_size=room_size)
def write_outfile_audio_data(outfile, shortest_file, sound_objects):
READ_BLOCK = 1024
cursor = 0
# Not sure if this is necessary but lets do it
for obj in sound_objects:
obj.mixdown_reader.seek(0)
while True:
remainder = shortest_file - cursor
to_read = min(READ_BLOCK, remainder)
if to_read == 0:
break
buffer = numpy.zeros((to_read, len(sound_objects)))
for i, sound_object in enumerate(sound_objects):
buffer[:, i] = sound_object.mixdown_reader.read(to_read)[:, 0]
outfile.write(buffer)
cursor = cursor + to_read
def write_muxed_wav(mix_pool: ObjectMixPool, scene, out_format, room_size,
outfile, shortest_file):
sound_objects = mix_pool.object_mixes
attach_outfile_metadata(out_format, outfile,
room_size, scene, sound_objects)
write_outfile_audio_data(outfile, shortest_file, sound_objects)
def mux_adm_from_object_mix_pool(scene, mix_pool: ObjectMixPool,
output_filename, room_size=1.):
object_count = len(mix_pool.object_mixes)
assert object_count > 0
out_format = FormatInfoChunk(channelCount=object_count,
sampleRate=scene.render.ffmpeg.audio_mixrate,
bitsPerSample=24)
with openBw64(output_filename, 'w', formatInfo=out_format) as outfile:
write_muxed_wav(mix_pool, scene, out_format, room_size,
outfile, mix_pool.shortest_file_length)
def print_partition_results(object_groups, sound_sources, too_far_speakers): def print_partition_results(object_groups, sound_sources, too_far_speakers):
print("Will create {} objects for {} sources, ignoring {} sources".format( print("Will create {} objects for {} sources, ignoring {} sources".format(
@@ -225,7 +208,8 @@ def partition_sounds_to_objects(scene, max_objects) -> \
return object_groups, too_far_speakers return object_groups, too_far_speakers
def generate_adm(context: bpy.types.Context, filepath: str, room_size: float, def generate_adm(context: bpy.types.Context, filepath: str,
room_size: float,
max_objects: int) -> set[str]: max_objects: int) -> set[str]:
scene = context.scene scene = context.scene
@@ -239,10 +223,8 @@ def generate_adm(context: bpy.types.Context, filepath: str, room_size: float,
base_dir=dirname(filepath)) base_dir=dirname(filepath))
with ObjectMixPool(object_mixes=mix_groups) as pool: with ObjectMixPool(object_mixes=mix_groups) as pool:
mux_adm_from_object_mix_pool(scene, mix_pool=pool, # here is where we place the mix objects into the session
output_filename=filepath, pass
room_size=room_size)
print("Finished muxing ADM")
print("generate_adm exiting") # print("generate_adm exiting")
return {'FINISHED'} return {'FINISHED'}

View File

@@ -9,7 +9,7 @@ from .speaker_utils import solo_speakers, unmute_all_speakers
@contextmanager @contextmanager
def adm_object_rendering_context(scene: bpy.types.Scene): def object_rendering_context(scene: bpy.types.Scene):
old_ff = scene.render.image_settings.file_format old_ff = scene.render.image_settings.file_format
old_codec = scene.render.ffmpeg.audio_codec old_codec = scene.render.ffmpeg.audio_codec
old_chans = scene.render.ffmpeg.audio_channels old_chans = scene.render.ffmpeg.audio_channels
@@ -34,8 +34,8 @@ class ObjectMix:
self.intermediate_filename = None self.intermediate_filename = None
self.base_dir = base_dir self.base_dir = base_dir
self.scene = scene self.scene = scene
self._mixdown_file_handle = None # self._mixdown_file_handle = None
self._mixdown_reader = None # self._mixdown_reader = None
@property @property
def frame_start(self): def frame_start(self):
@@ -73,7 +73,7 @@ class ObjectMix:
return self.sources[0].name return self.sources[0].name
def mixdown(self): def mixdown(self):
with adm_object_rendering_context(self.scene) as scene: with object_rendering_context(self.scene) as scene:
solo_speakers(scene, self.sources) solo_speakers(scene, self.sources)
scene_name = bpy.path.clean_name(scene.name) scene_name = bpy.path.clean_name(scene.name)
@@ -83,7 +83,7 @@ class ObjectMix:
self.base_dir, "%s_%s.wav" % (scene_name, speaker_name)) self.base_dir, "%s_%s.wav" % (scene_name, speaker_name))
bpy.ops.sound.mixdown(filepath=self.intermediate_filename, bpy.ops.sound.mixdown(filepath=self.intermediate_filename,
container='WAV', codec='PCM', format='S24') container='WAV', codec='PCM', format='F32')
print("Created mixdown named {}" print("Created mixdown named {}"
.format(self.intermediate_filename)) .format(self.intermediate_filename))
@@ -121,8 +121,10 @@ class ObjectMixPool:
return min(lengths) return min(lengths)
def object_mixes_from_source_groups(groups: List[List[bpy.types.Object]], def object_mixes_from_source_groups(
scene: bpy.types.Scene, base_dir: str): groups: List[List[bpy.types.Object]],
scene: bpy.types.Scene, base_dir: str) -> list[ObjectMix]:
mixes = [] mixes = []
for group in groups: for group in groups:
mixes.append(ObjectMix(sources=group, scene=scene, base_dir=base_dir)) mixes.append(ObjectMix(sources=group, scene=scene, base_dir=base_dir))

View File

@@ -1,86 +0,0 @@
## This is copied from
## https://blender.stackexchange.com/questions/4956/convert-particle-system-to-animated-meshes?answertab=active#tab-top
#
# And needs to be adapted
import bpy
# Set these to False if you don't want to key that property.
KEYFRAME_LOCATION = True
KEYFRAME_ROTATION = True
KEYFRAME_SCALE = True
KEYFRAME_VISIBILITY = True # Viewport and render visibility.
def create_objects_for_particles(ps, obj):
# Duplicate the given object for every particle and return the duplicates.
# Use instances instead of full copies.
obj_list = []
mesh = obj.data
particles_coll = bpy.data.collections.new(name="particles")
bpy.context.scene.collection.children.link(particles_coll)
for i, _ in enumerate(ps.particles):
dupli = bpy.data.objects.new(
name="particle.{:03d}".format(i),
object_data=mesh)
particles_coll.objects.link(dupli)
obj_list.append(dupli)
return obj_list
def match_and_keyframe_objects(ps, obj_list, start_frame, end_frame):
# Match and keyframe the objects to the particles for every frame in the
# given range.
for frame in range(start_frame, end_frame + 1):
print("frame {} processed".format(frame))
bpy.context.scene.frame_set(frame)
for p, obj in zip(ps.particles, obj_list):
match_object_to_particle(p, obj)
keyframe_obj(obj)
def match_object_to_particle(p, obj):
# Match the location, rotation, scale and visibility of the object to
# the particle.
loc = p.location
rot = p.rotation
size = p.size
if p.alive_state == 'ALIVE':
vis = True
else:
vis = False
obj.location = loc
# Set rotation mode to quaternion to match particle rotation.
obj.rotation_mode = 'QUATERNION'
obj.rotation_quaternion = rot
obj.scale = (size, size, size)
obj.hide_viewport = not(vis) # <<<-- this was called "hide" in <= 2.79
obj.hide_render = not(vis)
def keyframe_obj(obj):
# Keyframe location, rotation, scale and visibility if specified.
if KEYFRAME_LOCATION:
obj.keyframe_insert("location")
if KEYFRAME_ROTATION:
obj.keyframe_insert("rotation_quaternion")
if KEYFRAME_SCALE:
obj.keyframe_insert("scale")
if KEYFRAME_VISIBILITY:
obj.keyframe_insert("hide_viewport") # <<<-- this was called "hide" in <= 2.79
obj.keyframe_insert("hide_render")
def main():
#in 2.8 you need to evaluate the Dependency graph in order to get data from animation, modifiers, etc
depsgraph = bpy.context.evaluated_depsgraph_get()
# Assume only 2 objects are selected.
# The active object should be the one with the particle system.
ps_obj = bpy.context.object
ps_obj_evaluated = depsgraph.objects[ ps_obj.name ]
obj = [obj for obj in bpy.context.selected_objects if obj != ps_obj][0]
ps = ps_obj_evaluated.particle_systems[0] # Assume only 1 particle system is present.
start_frame = bpy.context.scene.frame_start
end_frame = bpy.context.scene.frame_end
obj_list = create_objects_for_particles(ps, obj)
match_and_keyframe_objects(ps, obj_list, start_frame, end_frame)
if __name__ == '__main__':
main()