Removing dead code for ADM

This commit is contained in:
2025-11-05 21:38:55 -08:00
parent b83476f820
commit 7b2c3fdc1f
3 changed files with 129 additions and 231 deletions

View File

@@ -1,32 +1,14 @@
import bpy
import lxml
import uuid
from fractions import Fraction
import struct
from os.path import dirname
import numpy
from time import strftime
from typing import List
from ear.fileio.utils import openBw64
from ear.fileio.bw64.chunks import (FormatInfoChunk, ChnaChunk)
from ear.fileio.adm import chna as adm_chna
from ear.fileio.adm.xml import adm_to_xml
from ear.fileio.adm.builder import (ADMBuilder)
from ear.fileio.adm.generate_ids import generate_ids
from .geom_utils import (speaker_active_time_range,
speakers_by_min_distance,
speakers_by_start_time)
from .object_mix import (ObjectMix, ObjectMixPool,
object_mixes_from_source_groups)
from .object_mix import (ObjectMixPool, object_mixes_from_source_groups)
from .speaker_utils import (all_speakers)
@@ -57,7 +39,8 @@ def group_speakers(speakers, scene) -> List[List[bpy.types.Object]]:
ret_val = [[]]
for spk in by_priority:
success = False # flaggy-flag because I can't do a break->continue from the inner
success = False # flaggy-flag because I can't do a break->continue
# from the inner
for elem in ret_val:
if list_can_accept_speaker(elem, spk):
elem.append(spk)
@@ -72,123 +55,123 @@ def group_speakers(speakers, scene) -> List[List[bpy.types.Object]]:
return ret_val
def adm_for_object(scene: bpy.types.Scene, sound_object: ObjectMix, room_size,
adm_builder, object_index):
fps = scene.render.fps
frame_start = scene.frame_start
frame_end = scene.frame_end
block_formats = sound_object.adm_block_formats(room_size=room_size)
created = adm_builder.create_item_objects(track_index=object_index,
name=sound_object.object_name,
block_formats=block_formats)
created.audio_object.start = Fraction(frame_start, fps)
created.audio_object.duration = Fraction(frame_end - frame_start, fps)
created.track_uid.sampleRate = sound_object.sample_rate
created.track_uid.bitDepth = sound_object.bits_per_sample
# def adm_for_object(scene: bpy.types.Scene, sound_object: ObjectMix, room_size,
# adm_builder, object_index):
# fps = scene.render.fps
# frame_start = scene.frame_start
# frame_end = scene.frame_end
#
# # block_formats = sound_object.adm_block_formats(room_size=room_size)
#
# created = adm_builder.create_item_objects(track_index=object_index,
# name=sound_object.object_name,
# block_formats=block_formats)
#
# created.audio_object.start = Fraction(frame_start, fps)
# created.audio_object.duration = Fraction(frame_end - frame_start, fps)
# created.track_uid.sampleRate = sound_object.sample_rate
# created.track_uid.bitDepth = sound_object.bits_per_sample
def adm_for_scene(scene: bpy.types.Scene, sound_object_mixes: List[ObjectMix],
room_size):
adm_builder = ADMBuilder()
# def adm_for_scene(scene: bpy.types.Scene, sound_object_mixes: List[ObjectMix],
# room_size):
# adm_builder = ADMBuilder()
#
# frame_start = scene.frame_start
# frame_end = scene.frame_end
# fps = scene.render.fps
#
# adm_builder.create_programme(audioProgrammeName=scene.name,
# start=Fraction(frame_start, fps),
# end=Fraction(frame_end, fps))
#
# adm_builder.create_content(audioContentName="Objects")
#
# for object_index, sound_object in enumerate(sound_object_mixes):
# adm_for_object(scene, sound_object, room_size,
# adm_builder, object_index)
#
# adm = adm_builder.adm
#
# generate_ids(adm)
# chna = ChnaChunk()
# adm_chna.populate_chna_chunk(chna, adm)
#
# return adm_to_xml(adm), chna
#
frame_start = scene.frame_start
frame_end = scene.frame_end
fps = scene.render.fps
adm_builder.create_programme(audioProgrammeName=scene.name,
start=Fraction(frame_start, fps),
end=Fraction(frame_end, fps))
adm_builder.create_content(audioContentName="Objects")
for object_index, sound_object in enumerate(sound_object_mixes):
adm_for_object(scene, sound_object, room_size,
adm_builder, object_index)
adm = adm_builder.adm
generate_ids(adm)
chna = ChnaChunk()
adm_chna.populate_chna_chunk(chna, adm)
return adm_to_xml(adm), chna
# def bext_data(scene, sample_rate, room_size):
# description = "SCENE={};ROOM_SIZE={}\n".format(
# scene.name, room_size).encode("ascii")
# originator_name = "Blender {}".format(
# bpy.app.version_string).encode("ascii")
# originator_ref = uuid.uuid1().hex.encode("ascii")
# date10 = strftime("%Y-%m-%d").encode("ascii")
# time8 = strftime("%H:%M:%S").encode("ascii")
# timeref = int(float(scene.frame_start) *
# sample_rate / float(scene.render.fps))
# version = 0
# umid = b"\0" * 64
# pad = b"\0" * 190
#
# data = struct.pack("<256s32s32s10s8sQH64s190s", description,
# originator_name, originator_ref, date10, time8, timeref,
# version, umid, pad)
#
# return data
#
#
# def attach_outfile_metadata(out_format, outfile, room_size, scene,
# sound_objects):
# adm, chna = adm_for_scene(scene, sound_objects, room_size=room_size)
# outfile.axml = lxml.etree.tostring(adm, pretty_print=True)
# outfile.chna = chna
# outfile.bext = bext_data(scene, out_format.sampleRate, room_size=room_size)
#
#
# def write_outfile_audio_data(outfile, shortest_file, sound_objects):
# READ_BLOCK = 1024
# cursor = 0
#
# # Not sure if this is necessary but lets do it
# for obj in sound_objects:
# obj.mixdown_reader.seek(0)
#
# while True:
# remainder = shortest_file - cursor
# to_read = min(READ_BLOCK, remainder)
# if to_read == 0:
# break
#
# buffer = numpy.zeros((to_read, len(sound_objects)))
# for i, sound_object in enumerate(sound_objects):
# buffer[:, i] = sound_object.mixdown_reader.read(to_read)[:, 0]
#
# outfile.write(buffer)
# cursor = cursor + to_read
def bext_data(scene, sample_rate, room_size):
description = "SCENE={};ROOM_SIZE={}\n".format(
scene.name, room_size).encode("ascii")
originator_name = "Blender {}".format(
bpy.app.version_string).encode("ascii")
originator_ref = uuid.uuid1().hex.encode("ascii")
date10 = strftime("%Y-%m-%d").encode("ascii")
time8 = strftime("%H:%M:%S").encode("ascii")
timeref = int(float(scene.frame_start) *
sample_rate / float(scene.render.fps))
version = 0
umid = b"\0" * 64
pad = b"\0" * 190
data = struct.pack("<256s32s32s10s8sQH64s190s", description,
originator_name, originator_ref, date10, time8, timeref,
version, umid, pad)
return data
def attach_outfile_metadata(out_format, outfile, room_size, scene,
sound_objects):
adm, chna = adm_for_scene(scene, sound_objects, room_size=room_size)
outfile.axml = lxml.etree.tostring(adm, pretty_print=True)
outfile.chna = chna
outfile.bext = bext_data(scene, out_format.sampleRate, room_size=room_size)
def write_outfile_audio_data(outfile, shortest_file, sound_objects):
READ_BLOCK = 1024
cursor = 0
# Not sure if this is necessary but lets do it
for obj in sound_objects:
obj.mixdown_reader.seek(0)
while True:
remainder = shortest_file - cursor
to_read = min(READ_BLOCK, remainder)
if to_read == 0:
break
buffer = numpy.zeros((to_read, len(sound_objects)))
for i, sound_object in enumerate(sound_objects):
buffer[:, i] = sound_object.mixdown_reader.read(to_read)[:, 0]
outfile.write(buffer)
cursor = cursor + to_read
def write_muxed_wav(mix_pool: ObjectMixPool, scene, out_format, room_size,
outfile, shortest_file):
sound_objects = mix_pool.object_mixes
attach_outfile_metadata(out_format, outfile,
room_size, scene, sound_objects)
write_outfile_audio_data(outfile, shortest_file, sound_objects)
def mux_adm_from_object_mix_pool(scene, mix_pool: ObjectMixPool,
output_filename, room_size=1.):
object_count = len(mix_pool.object_mixes)
assert object_count > 0
out_format = FormatInfoChunk(channelCount=object_count,
sampleRate=scene.render.ffmpeg.audio_mixrate,
bitsPerSample=24)
with openBw64(output_filename, 'w', formatInfo=out_format) as outfile:
write_muxed_wav(mix_pool, scene, out_format, room_size,
outfile, mix_pool.shortest_file_length)
# def write_muxed_wav(mix_pool: ObjectMixPool, scene, out_format, room_size,
# outfile, shortest_file):
# sound_objects = mix_pool.object_mixes
# attach_outfile_metadata(out_format, outfile,
# room_size, scene, sound_objects)
# write_outfile_audio_data(outfile, shortest_file, sound_objects)
#
#
# def mux_adm_from_object_mix_pool(scene, mix_pool: ObjectMixPool,
# output_filename, room_size=1.):
# object_count = len(mix_pool.object_mixes)
# assert object_count > 0
#
# out_format = FormatInfoChunk(channelCount=object_count,
# sampleRate=scene.render.ffmpeg.audio_mixrate,
# bitsPerSample=24)
#
# with openBw64(output_filename, 'w', formatInfo=out_format) as outfile:
# write_muxed_wav(mix_pool, scene, out_format, room_size,
# outfile, mix_pool.shortest_file_length)
#
def print_partition_results(object_groups, sound_sources, too_far_speakers):
print("Will create {} objects for {} sources, ignoring {} sources".format(
@@ -225,7 +208,8 @@ def partition_sounds_to_objects(scene, max_objects) -> \
return object_groups, too_far_speakers
def generate_adm(context: bpy.types.Context, filepath: str, room_size: float,
def generate_adm(context: bpy.types.Context, filepath: str,
room_size: float,
max_objects: int) -> set[str]:
scene = context.scene
@@ -239,10 +223,8 @@ def generate_adm(context: bpy.types.Context, filepath: str, room_size: float,
base_dir=dirname(filepath))
with ObjectMixPool(object_mixes=mix_groups) as pool:
mux_adm_from_object_mix_pool(scene, mix_pool=pool,
output_filename=filepath,
room_size=room_size)
print("Finished muxing ADM")
# here is where we place the mix objects into the session
pass
print("generate_adm exiting")
# print("generate_adm exiting")
return {'FINISHED'}

View File

@@ -9,7 +9,7 @@ from .speaker_utils import solo_speakers, unmute_all_speakers
@contextmanager
def adm_object_rendering_context(scene: bpy.types.Scene):
def object_rendering_context(scene: bpy.types.Scene):
old_ff = scene.render.image_settings.file_format
old_codec = scene.render.ffmpeg.audio_codec
old_chans = scene.render.ffmpeg.audio_channels
@@ -34,8 +34,8 @@ class ObjectMix:
self.intermediate_filename = None
self.base_dir = base_dir
self.scene = scene
self._mixdown_file_handle = None
self._mixdown_reader = None
# self._mixdown_file_handle = None
# self._mixdown_reader = None
@property
def frame_start(self):
@@ -73,7 +73,7 @@ class ObjectMix:
return self.sources[0].name
def mixdown(self):
with adm_object_rendering_context(self.scene) as scene:
with object_rendering_context(self.scene) as scene:
solo_speakers(scene, self.sources)
scene_name = bpy.path.clean_name(scene.name)
@@ -83,7 +83,7 @@ class ObjectMix:
self.base_dir, "%s_%s.wav" % (scene_name, speaker_name))
bpy.ops.sound.mixdown(filepath=self.intermediate_filename,
container='WAV', codec='PCM', format='S24')
container='WAV', codec='PCM', format='F32')
print("Created mixdown named {}"
.format(self.intermediate_filename))
@@ -121,8 +121,10 @@ class ObjectMixPool:
return min(lengths)
def object_mixes_from_source_groups(groups: List[List[bpy.types.Object]],
scene: bpy.types.Scene, base_dir: str):
def object_mixes_from_source_groups(
groups: List[List[bpy.types.Object]],
scene: bpy.types.Scene, base_dir: str) -> list[ObjectMix]:
mixes = []
for group in groups:
mixes.append(ObjectMix(sources=group, scene=scene, base_dir=base_dir))