Compare commits

...

5 Commits

Author SHA1 Message Date
60dd1de71d Updating requirements, removing ear 2025-11-05 21:41:27 -08:00
7b2c3fdc1f Removing dead code for ADM 2025-11-05 21:38:55 -08:00
b83476f820 Removing ear dependency 2025-11-05 21:18:39 -08:00
8e9a49b587 Removing ear dependency 2025-11-05 21:17:28 -08:00
dc064cba29 Cleaning some things out 2025-11-05 21:12:55 -08:00
5 changed files with 153 additions and 300 deletions

View File

@@ -1,32 +1,14 @@
import bpy
import lxml
import uuid
from fractions import Fraction
import struct
from os.path import dirname
import numpy
from time import strftime
from typing import List
from ear.fileio.utils import openBw64
from ear.fileio.bw64.chunks import (FormatInfoChunk, ChnaChunk)
from ear.fileio.adm import chna as adm_chna
from ear.fileio.adm.xml import adm_to_xml
from ear.fileio.adm.builder import (ADMBuilder)
from ear.fileio.adm.generate_ids import generate_ids
from .geom_utils import (speaker_active_time_range,
speakers_by_min_distance,
speakers_by_start_time)
from .object_mix import (ObjectMix, ObjectMixPool,
object_mixes_from_source_groups)
from .object_mix import (ObjectMixPool, object_mixes_from_source_groups)
from .speaker_utils import (all_speakers)
@@ -57,7 +39,8 @@ def group_speakers(speakers, scene) -> List[List[bpy.types.Object]]:
ret_val = [[]]
for spk in by_priority:
success = False # flaggy-flag because I can't do a break->continue from the inner
success = False # flaggy-flag because I can't do a break->continue
# from the inner
for elem in ret_val:
if list_can_accept_speaker(elem, spk):
elem.append(spk)
@@ -72,123 +55,123 @@ def group_speakers(speakers, scene) -> List[List[bpy.types.Object]]:
return ret_val
def adm_for_object(scene: bpy.types.Scene, sound_object: ObjectMix, room_size,
adm_builder, object_index):
fps = scene.render.fps
frame_start = scene.frame_start
frame_end = scene.frame_end
block_formats = sound_object.adm_block_formats(room_size=room_size)
created = adm_builder.create_item_objects(track_index=object_index,
name=sound_object.object_name,
block_formats=block_formats)
created.audio_object.start = Fraction(frame_start, fps)
created.audio_object.duration = Fraction(frame_end - frame_start, fps)
created.track_uid.sampleRate = sound_object.sample_rate
created.track_uid.bitDepth = sound_object.bits_per_sample
# def adm_for_object(scene: bpy.types.Scene, sound_object: ObjectMix, room_size,
# adm_builder, object_index):
# fps = scene.render.fps
# frame_start = scene.frame_start
# frame_end = scene.frame_end
#
# # block_formats = sound_object.adm_block_formats(room_size=room_size)
#
# created = adm_builder.create_item_objects(track_index=object_index,
# name=sound_object.object_name,
# block_formats=block_formats)
#
# created.audio_object.start = Fraction(frame_start, fps)
# created.audio_object.duration = Fraction(frame_end - frame_start, fps)
# created.track_uid.sampleRate = sound_object.sample_rate
# created.track_uid.bitDepth = sound_object.bits_per_sample
def adm_for_scene(scene: bpy.types.Scene, sound_object_mixes: List[ObjectMix],
room_size):
adm_builder = ADMBuilder()
# def adm_for_scene(scene: bpy.types.Scene, sound_object_mixes: List[ObjectMix],
# room_size):
# adm_builder = ADMBuilder()
#
# frame_start = scene.frame_start
# frame_end = scene.frame_end
# fps = scene.render.fps
#
# adm_builder.create_programme(audioProgrammeName=scene.name,
# start=Fraction(frame_start, fps),
# end=Fraction(frame_end, fps))
#
# adm_builder.create_content(audioContentName="Objects")
#
# for object_index, sound_object in enumerate(sound_object_mixes):
# adm_for_object(scene, sound_object, room_size,
# adm_builder, object_index)
#
# adm = adm_builder.adm
#
# generate_ids(adm)
# chna = ChnaChunk()
# adm_chna.populate_chna_chunk(chna, adm)
#
# return adm_to_xml(adm), chna
#
frame_start = scene.frame_start
frame_end = scene.frame_end
fps = scene.render.fps
adm_builder.create_programme(audioProgrammeName=scene.name,
start=Fraction(frame_start, fps),
end=Fraction(frame_end, fps))
adm_builder.create_content(audioContentName="Objects")
for object_index, sound_object in enumerate(sound_object_mixes):
adm_for_object(scene, sound_object, room_size,
adm_builder, object_index)
adm = adm_builder.adm
generate_ids(adm)
chna = ChnaChunk()
adm_chna.populate_chna_chunk(chna, adm)
return adm_to_xml(adm), chna
# def bext_data(scene, sample_rate, room_size):
# description = "SCENE={};ROOM_SIZE={}\n".format(
# scene.name, room_size).encode("ascii")
# originator_name = "Blender {}".format(
# bpy.app.version_string).encode("ascii")
# originator_ref = uuid.uuid1().hex.encode("ascii")
# date10 = strftime("%Y-%m-%d").encode("ascii")
# time8 = strftime("%H:%M:%S").encode("ascii")
# timeref = int(float(scene.frame_start) *
# sample_rate / float(scene.render.fps))
# version = 0
# umid = b"\0" * 64
# pad = b"\0" * 190
#
# data = struct.pack("<256s32s32s10s8sQH64s190s", description,
# originator_name, originator_ref, date10, time8, timeref,
# version, umid, pad)
#
# return data
#
#
# def attach_outfile_metadata(out_format, outfile, room_size, scene,
# sound_objects):
# adm, chna = adm_for_scene(scene, sound_objects, room_size=room_size)
# outfile.axml = lxml.etree.tostring(adm, pretty_print=True)
# outfile.chna = chna
# outfile.bext = bext_data(scene, out_format.sampleRate, room_size=room_size)
#
#
# def write_outfile_audio_data(outfile, shortest_file, sound_objects):
# READ_BLOCK = 1024
# cursor = 0
#
# # Not sure if this is necessary but lets do it
# for obj in sound_objects:
# obj.mixdown_reader.seek(0)
#
# while True:
# remainder = shortest_file - cursor
# to_read = min(READ_BLOCK, remainder)
# if to_read == 0:
# break
#
# buffer = numpy.zeros((to_read, len(sound_objects)))
# for i, sound_object in enumerate(sound_objects):
# buffer[:, i] = sound_object.mixdown_reader.read(to_read)[:, 0]
#
# outfile.write(buffer)
# cursor = cursor + to_read
def bext_data(scene, sample_rate, room_size):
description = "SCENE={};ROOM_SIZE={}\n".format(
scene.name, room_size).encode("ascii")
originator_name = "Blender {}".format(
bpy.app.version_string).encode("ascii")
originator_ref = uuid.uuid1().hex.encode("ascii")
date10 = strftime("%Y-%m-%d").encode("ascii")
time8 = strftime("%H:%M:%S").encode("ascii")
timeref = int(float(scene.frame_start) *
sample_rate / float(scene.render.fps))
version = 0
umid = b"\0" * 64
pad = b"\0" * 190
data = struct.pack("<256s32s32s10s8sQH64s190s", description,
originator_name, originator_ref, date10, time8, timeref,
version, umid, pad)
return data
def attach_outfile_metadata(out_format, outfile, room_size, scene,
sound_objects):
adm, chna = adm_for_scene(scene, sound_objects, room_size=room_size)
outfile.axml = lxml.etree.tostring(adm, pretty_print=True)
outfile.chna = chna
outfile.bext = bext_data(scene, out_format.sampleRate, room_size=room_size)
def write_outfile_audio_data(outfile, shortest_file, sound_objects):
READ_BLOCK = 1024
cursor = 0
# Not sure if this is necessary but lets do it
for obj in sound_objects:
obj.mixdown_reader.seek(0)
while True:
remainder = shortest_file - cursor
to_read = min(READ_BLOCK, remainder)
if to_read == 0:
break
buffer = numpy.zeros((to_read, len(sound_objects)))
for i, sound_object in enumerate(sound_objects):
buffer[:, i] = sound_object.mixdown_reader.read(to_read)[:, 0]
outfile.write(buffer)
cursor = cursor + to_read
def write_muxed_wav(mix_pool: ObjectMixPool, scene, out_format, room_size,
outfile, shortest_file):
sound_objects = mix_pool.object_mixes
attach_outfile_metadata(out_format, outfile,
room_size, scene, sound_objects)
write_outfile_audio_data(outfile, shortest_file, sound_objects)
def mux_adm_from_object_mix_pool(scene, mix_pool: ObjectMixPool,
output_filename, room_size=1.):
object_count = len(mix_pool.object_mixes)
assert object_count > 0
out_format = FormatInfoChunk(channelCount=object_count,
sampleRate=scene.render.ffmpeg.audio_mixrate,
bitsPerSample=24)
with openBw64(output_filename, 'w', formatInfo=out_format) as outfile:
write_muxed_wav(mix_pool, scene, out_format, room_size,
outfile, mix_pool.shortest_file_length)
# def write_muxed_wav(mix_pool: ObjectMixPool, scene, out_format, room_size,
# outfile, shortest_file):
# sound_objects = mix_pool.object_mixes
# attach_outfile_metadata(out_format, outfile,
# room_size, scene, sound_objects)
# write_outfile_audio_data(outfile, shortest_file, sound_objects)
#
#
# def mux_adm_from_object_mix_pool(scene, mix_pool: ObjectMixPool,
# output_filename, room_size=1.):
# object_count = len(mix_pool.object_mixes)
# assert object_count > 0
#
# out_format = FormatInfoChunk(channelCount=object_count,
# sampleRate=scene.render.ffmpeg.audio_mixrate,
# bitsPerSample=24)
#
# with openBw64(output_filename, 'w', formatInfo=out_format) as outfile:
# write_muxed_wav(mix_pool, scene, out_format, room_size,
# outfile, mix_pool.shortest_file_length)
#
def print_partition_results(object_groups, sound_sources, too_far_speakers):
print("Will create {} objects for {} sources, ignoring {} sources".format(
@@ -225,7 +208,8 @@ def partition_sounds_to_objects(scene, max_objects) -> \
return object_groups, too_far_speakers
def generate_adm(context: bpy.types.Context, filepath: str, room_size: float,
def generate_adm(context: bpy.types.Context, filepath: str,
room_size: float,
max_objects: int) -> set[str]:
scene = context.scene
@@ -239,10 +223,8 @@ def generate_adm(context: bpy.types.Context, filepath: str, room_size: float,
base_dir=dirname(filepath))
with ObjectMixPool(object_mixes=mix_groups) as pool:
mux_adm_from_object_mix_pool(scene, mix_pool=pool,
output_filename=filepath,
room_size=room_size)
print("Finished muxing ADM")
# here is where we place the mix objects into the session
pass
print("generate_adm exiting")
# print("generate_adm exiting")
return {'FINISHED'}

View File

@@ -1,20 +1,15 @@
import os
import bpy
from contextlib import contextmanager
from fractions import Fraction
from typing import List
from ear.fileio.adm.elements import (ObjectCartesianPosition, JumpPosition,
AudioBlockFormatObjects)
from ear.fileio.bw64 import Bw64Reader
import wave
from .geom_utils import (speaker_active_time_range, compute_relative_vector,
room_norm_vector)
from .speaker_utils import solo_speakers, unmute_all_speakers
@contextmanager
def adm_object_rendering_context(scene: bpy.types.Scene):
def object_rendering_context(scene: bpy.types.Scene):
old_ff = scene.render.image_settings.file_format
old_codec = scene.render.ffmpeg.audio_codec
old_chans = scene.render.ffmpeg.audio_channels
@@ -39,8 +34,8 @@ class ObjectMix:
self.intermediate_filename = None
self.base_dir = base_dir
self.scene = scene
self._mixdown_file_handle = None
self._mixdown_reader = None
# self._mixdown_file_handle = None
# self._mixdown_reader = None
@property
def frame_start(self):
@@ -50,34 +45,27 @@ class ObjectMix:
def frame_end(self):
return self.scene.frame_end
@property
def sample_rate(self):
return self.mixdown_reader.sampleRate
@property
def sample_rate(self) -> int:
with wave.open(self.mixdown_filename, "rb") as f:
return f.getframerate()
@property
def bits_per_sample(self):
return self.mixdown_reader.bitdepth
def bits_per_sample(self) -> int:
with wave.open(self.mixdown_filename, "rb") as f:
return f.getsampwidth() * 8
@property
def frames_length(self) -> int:
with wave.open(self.mixdown_filename, "rb") as f:
return f.getnframes()
@property
def mixdown_reader(self) -> Bw64Reader:
if self._mixdown_reader is None:
self._mixdown_reader = Bw64Reader(self.mixdown_file_handle)
return self._mixdown_reader
@property
def mixdown_file_handle(self):
assert self.mixdown_filename
if self._mixdown_file_handle is None:
self._mixdown_file_handle = open(self.mixdown_filename, 'rb')
return self._mixdown_file_handle
@property
def mixdown_filename(self):
def mixdown_filename(self) -> str:
if self.intermediate_filename is None:
self.mixdown()
assert self.intermediate_filename
return self.intermediate_filename
@property
@@ -85,7 +73,7 @@ class ObjectMix:
return self.sources[0].name
def mixdown(self):
with adm_object_rendering_context(self.scene) as scene:
with object_rendering_context(self.scene) as scene:
solo_speakers(scene, self.sources)
scene_name = bpy.path.clean_name(scene.name)
@@ -95,50 +83,13 @@ class ObjectMix:
self.base_dir, "%s_%s.wav" % (scene_name, speaker_name))
bpy.ops.sound.mixdown(filepath=self.intermediate_filename,
container='WAV', codec='PCM', format='S24')
container='WAV', codec='PCM', format='F32')
print("Created mixdown named {}"
.format(self.intermediate_filename))
unmute_all_speakers(scene)
def adm_block_formats(self, room_size=1.):
fps = self.scene.render.fps
block_formats = []
for speaker_obj in self.sources:
speaker_interval = speaker_active_time_range(speaker_obj)
for frame in range(speaker_interval.start_frame,
speaker_interval.end_frame + 1):
assert self.scene.camera
self.scene.frame_set(frame)
relative_vector = compute_relative_vector(
camera=self.scene.camera,
target=speaker_obj)
norm_vec = room_norm_vector(
relative_vector, room_size=room_size)
pos = ObjectCartesianPosition(X=norm_vec.x, Y=norm_vec.y,
Z=norm_vec.z)
if len(block_formats) == 0 or pos != block_formats[-1].position:
jp = JumpPosition(
flag=True, interpolationLength=Fraction(1, fps * 2))
block = AudioBlockFormatObjects(position=pos,
rtime=Fraction(frame, fps),
duration=Fraction(1, fps),
cartesian=True,
jumpPosition=jp)
block_formats.append(block)
else:
block_formats[-1].duration = block_formats[-1].duration + \
Fraction(1, fps)
return block_formats
def rm_mixdown(self):
if self._mixdown_reader is not None:
self._mixdown_reader = None
@@ -166,12 +117,14 @@ class ObjectMixPool:
@property
def shortest_file_length(self):
lengths = map(lambda f: len(f.mixdown_reader), self.object_mixes)
lengths = map(lambda f: f.frames_length, self.object_mixes)
return min(lengths)
def object_mixes_from_source_groups(groups: List[List[bpy.types.Object]],
scene: bpy.types.Scene, base_dir: str):
def object_mixes_from_source_groups(
groups: List[List[bpy.types.Object]],
scene: bpy.types.Scene, base_dir: str) -> list[ObjectMix]:
mixes = []
for group in groups:
mixes.append(ObjectMix(sources=group, scene=scene, base_dir=base_dir))

View File

@@ -5,6 +5,10 @@ def all_speakers(scene: bpy.types.Scene) -> list[bpy.types.Object]:
def solo_speakers(scene: bpy.types.Scene, solo_group: list[bpy.types.Object]):
"""
Mutes all Objects not in `solo_group` and ensures all objects in this group
are not muted.
"""
for speaker in all_speakers(scene):
assert type(speaker.data) is bpy.types.Speaker
if speaker in solo_group:
@@ -16,6 +20,9 @@ def solo_speakers(scene: bpy.types.Scene, solo_group: list[bpy.types.Object]):
def unmute_all_speakers(scene):
"""
Unmutes all speakers.
"""
for speaker in all_speakers(scene):
assert type(speaker.data) is bpy.types.Speaker
speaker.data.muted = False

View File

@@ -1,86 +0,0 @@
## This is copied from
## https://blender.stackexchange.com/questions/4956/convert-particle-system-to-animated-meshes?answertab=active#tab-top
#
# And needs to be adapted
import bpy
# Set these to False if you don't want to key that property.
KEYFRAME_LOCATION = True
KEYFRAME_ROTATION = True
KEYFRAME_SCALE = True
KEYFRAME_VISIBILITY = True # Viewport and render visibility.
def create_objects_for_particles(ps, obj):
# Duplicate the given object for every particle and return the duplicates.
# Use instances instead of full copies.
obj_list = []
mesh = obj.data
particles_coll = bpy.data.collections.new(name="particles")
bpy.context.scene.collection.children.link(particles_coll)
for i, _ in enumerate(ps.particles):
dupli = bpy.data.objects.new(
name="particle.{:03d}".format(i),
object_data=mesh)
particles_coll.objects.link(dupli)
obj_list.append(dupli)
return obj_list
def match_and_keyframe_objects(ps, obj_list, start_frame, end_frame):
# Match and keyframe the objects to the particles for every frame in the
# given range.
for frame in range(start_frame, end_frame + 1):
print("frame {} processed".format(frame))
bpy.context.scene.frame_set(frame)
for p, obj in zip(ps.particles, obj_list):
match_object_to_particle(p, obj)
keyframe_obj(obj)
def match_object_to_particle(p, obj):
# Match the location, rotation, scale and visibility of the object to
# the particle.
loc = p.location
rot = p.rotation
size = p.size
if p.alive_state == 'ALIVE':
vis = True
else:
vis = False
obj.location = loc
# Set rotation mode to quaternion to match particle rotation.
obj.rotation_mode = 'QUATERNION'
obj.rotation_quaternion = rot
obj.scale = (size, size, size)
obj.hide_viewport = not(vis) # <<<-- this was called "hide" in <= 2.79
obj.hide_render = not(vis)
def keyframe_obj(obj):
# Keyframe location, rotation, scale and visibility if specified.
if KEYFRAME_LOCATION:
obj.keyframe_insert("location")
if KEYFRAME_ROTATION:
obj.keyframe_insert("rotation_quaternion")
if KEYFRAME_SCALE:
obj.keyframe_insert("scale")
if KEYFRAME_VISIBILITY:
obj.keyframe_insert("hide_viewport") # <<<-- this was called "hide" in <= 2.79
obj.keyframe_insert("hide_render")
def main():
#in 2.8 you need to evaluate the Dependency graph in order to get data from animation, modifiers, etc
depsgraph = bpy.context.evaluated_depsgraph_get()
# Assume only 2 objects are selected.
# The active object should be the one with the particle system.
ps_obj = bpy.context.object
ps_obj_evaluated = depsgraph.objects[ ps_obj.name ]
obj = [obj for obj in bpy.context.selected_objects if obj != ps_obj][0]
ps = ps_obj_evaluated.particle_systems[0] # Assume only 1 particle system is present.
start_frame = bpy.context.scene.frame_start
end_frame = bpy.context.scene.frame_end
obj_list = create_objects_for_particles(ps, obj)
match_and_keyframe_objects(ps, obj_list, start_frame, end_frame)
if __name__ == '__main__':
main()

View File

@@ -4,14 +4,12 @@ certifi==2025.10.5
charset-normalizer==3.4.4
cython==3.2.0
decorator==5.2.1
ear==2.1.0
executing==2.2.1
fake-bpy-module-4-3==20250130
idna==3.11
ipython==9.7.0
ipython-pygments-lexers==1.1.1
jedi==0.19.2
lxml==4.9.4
mathutils==3.3.0
matplotlib-inline==0.2.1
multipledispatch==0.6.0
@@ -29,7 +27,6 @@ pygments==2.19.2
requests==2.32.5
ruamel-yaml==0.18.16
ruamel-yaml-clib==0.2.14
scipy==1.16.3
setuptools==78.1.0
six==1.17.0
stack-data==0.6.3